file_id
stringlengths
5
9
content
stringlengths
100
5.25M
local_path
stringlengths
66
70
kaggle_dataset_name
stringlengths
3
50
kaggle_dataset_owner
stringlengths
3
20
kversion
stringlengths
497
763
kversion_datasetsources
stringlengths
71
5.46k
dataset_versions
stringlengths
338
235k
datasets
stringlengths
334
371
users
stringlengths
111
264
script
stringlengths
100
5.25M
df_info
stringlengths
0
4.87M
has_data_info
bool
2 classes
nb_filenames
int64
0
370
retreived_data_description
stringlengths
0
4.44M
script_nb_tokens
int64
25
663k
upvotes
int64
0
1.65k
tokens_description
int64
25
663k
tokens_script
int64
25
663k
129250852
import cv2 import numpy as np import torch from torch import nn import onnx import onnxruntime def L2NormDim(input, epsilon=1e-12): squared = input.pow(2) summed = squared.sum(dim=1, keepdim=True) sqrt_sum = summed.sqrt() return sqrt_sum # def expand_as(tensor, other_tensor, dim): # # get the original shape # # replace the shape along the specified dim with the size of other_tensor # size = other_tensor.shape[dim] # # repeat the tensor along the specified dim # repeated_tensor = tensor.repeat(1,size,1,1) # return repeated_tensor def expand_as(input, target, dim): # 计算需要重复的次数 repeats = [1] * len(target.shape) repeats[dim] = target.shape[dim] // input.shape[dim] # 在第二维度上进行重复 repeated_tensor = torch.cat([input] * repeats[dim], dim=1) # 裁剪到正确的形状 return repeated_tensor[:, : target.shape[dim], ...] def simple_nms(scores, nms_radius: int): """Fast Non-maximum suppression to remove nearby points""" zeros = torch.zeros_like(scores) max_mask = scores == torch.nn.functional.max_pool2d( scores, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius ) # for _ in range(2): # supp_mask = torch.nn.functional.max_pool2d(max_mask.float(), kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius) > 0 # supp_scores = torch.where(supp_mask, zeros, scores) # new_max_mask = supp_scores == torch.nn.functional.max_pool2d(supp_scores, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius) # max_mask = max_mask | (new_max_mask & (~supp_mask)) return torch.where(max_mask, scores, zeros) def sample_descriptors(keypoints, descriptors, s: int): """Interpolate descriptors at keypoint locations""" b, c, h, w = descriptors.shape keypoints = keypoints - s / 2 + 0.5 tmp = torch.tensor([w * s - s / 2 - 0.5, h * s - s / 2 - 0.5]).to(keypoints)[None] keypoints = keypoints / tmp keypoints = keypoints * 2 - 1 # normalize to (-1, 1) descriptors = torch.nn.functional.grid_sample( descriptors, keypoints.view(b, 1, -1, 2), mode="bilinear", align_corners=True ) descriptors = descriptors.reshape(b, c, -1) norm = descriptors.norm(dim=1, keepdim=True).expand_as(descriptors) descriptors = descriptors / norm return descriptors.reshape(c, -1).t() class SmallSuperPoint_Interpolate(nn.Module): def __init__(self): super().__init__() self.relu = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) # c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256 c1, c2, c3, c4, c5, d1 = 32, 64, 64, 128, 128, 128 self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1) conv1b_depth = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1, groups=c1) conv1b_point = nn.Conv2d(c1, c1, kernel_size=1) self.conv1b = nn.Sequential(conv1b_depth, conv1b_point) conv2a_depth = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1, groups=c1) conv2a_point = nn.Conv2d(c1, c2, kernel_size=1) self.conv2a = nn.Sequential(conv2a_depth, conv2a_point) conv2b_depth = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1, groups=c2) conv2b_point = nn.Conv2d(c2, c2, kernel_size=1) self.conv2b = nn.Sequential(conv2b_depth, conv2b_point) conv3a_depth = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1, groups=c2) conv3a_point = nn.Conv2d(c2, c3, kernel_size=1) self.conv3a = nn.Sequential(conv3a_depth, conv3a_point) conv3b_depth = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1, groups=c3) conv3b_point = nn.Conv2d(c3, c3, kernel_size=1) self.conv3b = nn.Sequential(conv3b_depth, conv3b_point) conv4a_depth = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1, groups=c3) conv4a_point = nn.Conv2d(c3, c4, kernel_size=1) self.conv4a = nn.Sequential(conv4a_depth, conv4a_point) conv4b_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4) conv4b_point = nn.Conv2d(c4, c4, kernel_size=1) self.conv4b = nn.Sequential(conv4b_depth, conv4b_point) convPa_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4) convPb_point = nn.Conv2d(c4, c5, kernel_size=1) self.convPa = nn.Sequential(convPa_depth, convPb_point) self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) convDa_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4) convDb_point = nn.Conv2d(c4, c5, kernel_size=1) self.convDa = nn.Sequential(convDa_depth, convDb_point) self.convDb = nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0) def forward(self, image): """Compute keypoints, scores, descriptors for image""" # Shared Encoder x = self.relu(self.conv1a(image)) x = self.relu(self.conv1b(x)) x = self.pool(x) x = self.relu(self.conv2a(x)) x = self.relu(self.conv2b(x)) x = self.pool(x) x = self.relu(self.conv3a(x)) x = self.relu(self.conv3b(x)) x = self.pool(x) x = self.relu(self.conv4a(x)) x = self.relu(self.conv4b(x)) # Compute the dense keypoint scores cPa = self.relu(self.convPa(x)) scores = self.convPb(cPa) scores = torch.nn.functional.softmax(scores, 1)[:, :64, :, :] b, _, h, w = scores.shape # scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8) # scores = scores.permute(0, 1, 3, 2, 4).reshape(b, 1, h*8, w*8) scores = scores.permute(0, 2, 3, 1).reshape(b * h, w, 8, 8) scores = scores.permute(0, 2, 1, 3).reshape(b, 1, h * 8, w * 8) # scores = simple_nms(scores, 4).reshape(h*8, w*8) # scores = simple_nms(scores, 4).reshape(-1) # # Extract keypoints # index_1d = torch.arange(0, h*8*w*8).to(scores) # keypoints = torch.stack([index_1d // (w*8), index_1d % (w*8)], dim=1) # Discard keypoints near the image borders # keypoints, scores = remove_borders(keypoints, scores, 4, h*8, w*8) # Keep the k keypoints with highest score # scores, indices = torch.topk(scores, 400) # keypoints = keypoints[indices] # # Convert (h, w) to (x, y) # keypoints = torch.flip(keypoints, [1]).float() # Compute the dense descriptors cDa = self.relu(self.convDa(x)) descriptors = self.convDb(cDa) # descriptors = torch.nn.functional.normalize(descriptors, dim=1, eps=1e-6) # breakpoint() # norm = descriptors.norm(dim=1, keepdim=True).expand_as(descriptors) t = L2NormDim(descriptors) norm = expand_as(t, descriptors, 1) descriptors = descriptors / norm # # Extract descriptors # descriptors = sample_descriptors_interpolate(keypoints, descriptors, 8) return descriptors, scores filename = "/kaggle/input/img-dataset/test_data/21000.jpg" def resize_image_cuda(img, resize=(640, 400)): h, w, _ = img.shape scale_w = resize[0] / w scale_h = resize[1] / h img = cv2.resize(img, resize) img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = torch.from_numpy(img)[None][None] / 255.0 return img.cuda(), (scale_w, scale_h) def resize_image_numpy(img, resize=(640, 400)): h, w, _ = img.shape scale_w = resize[0] / w scale_h = resize[1] / h img = cv2.resize(img, resize) img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = img[None][None] / 255.0 return img, (scale_w, scale_h) device = torch.device("cuda") superpoint_onnx = SmallSuperPoint_Interpolate() superpoint_onnx.load_state_dict( torch.load( "/kaggle/input/model-weight/small_superPointNet_ws_10000_checkpoint.pth.tar" )["model_state_dict"] ) superpoint_onnx = superpoint_onnx.eval().to(device) img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) superpoint_input, _ = resize_image(img) with torch.no_grad(): # import pdb;pdb.set_trace() kpts0, des0, score0 = superpoint_onnx(torch.from_numpy(superpoint_input).to("cpu")) def export_onnx_new(): device = torch.device("cuda") superpoint_onnx = SmallSuperPoint_Interpolate() superpoint_onnx.load_state_dict( torch.load( "/kaggle/input/model-weight/small_superPointNet_ws_10000_checkpoint.pth.tar" )["model_state_dict"] ) superpoint_onnx = superpoint_onnx.eval().to(device) img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) superpoint_input, _ = resize_image_cuda(img) # export to onnx torch.onnx.export( superpoint_onnx, # model being run superpoint_input, # model input (or a tuple for multiple inputs) "my_model_new.onnx", # where to save the model (can be a file or file-like object) export_params=True, # store the trained parameter weights inside the model file opset_version=11, input_names=["image"], # the model's input names output_names=["descriptors", "scores"], # the model's output names ) export_onnx_new() class SuperPointONNX(nn.Module): """SuperPoint Convolutional Detector and Descriptor SuperPoint: Self-Supervised Interest Point Detection and Description. Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. In CVPRW, 2019. https://arxiv.org/abs/1712.07629 """ def __init__(self): super().__init__() # self.config = {**self.default_config} self.relu = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256 self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1) self.conv1b = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1) self.conv2a = nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1) self.conv2b = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1) self.conv3a = nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1) self.conv3b = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1) self.conv4a = nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1) self.conv4b = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1) self.convPa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) self.convDa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convDb = nn.Conv2d(c5, 256, kernel_size=1, stride=1, padding=0) def forward(self, image): """Compute keypoints, scores, descriptors for image""" # Shared Encoder x = self.relu(self.conv1a(image)) x = self.relu(self.conv1b(x)) x = self.pool(x) x = self.relu(self.conv2a(x)) x = self.relu(self.conv2b(x)) x = self.pool(x) x = self.relu(self.conv3a(x)) x = self.relu(self.conv3b(x)) x = self.pool(x) x = self.relu(self.conv4a(x)) x = self.relu(self.conv4b(x)) # Compute the dense keypoint scores cPa = self.relu(self.convPa(x)) scores = self.convPb(cPa) scores = torch.nn.functional.softmax(scores, 1)[:, :64, :, :] b, _, h, w = scores.shape scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8) scores = scores.permute(0, 1, 3, 2, 4).reshape(b, 1, h * 8, w * 8) # scores = simple_nms(scores, 4).reshape(h*8, w*8) scores = simple_nms(scores, 4).reshape(-1) # Extract keypoints index_1d = torch.arange(0, h * 8 * w * 8).to(scores) keypoints = torch.stack([index_1d // (w * 8), index_1d % (w * 8)], dim=1) # Discard keypoints near the image borders # keypoints, scores = remove_borders(keypoints, scores, 4, h*8, w*8) # import pdb;pdb.set_trace() # scores=scores*10**6 torch.set_printoptions(precision=8) scores, indices = torch.topk(scores, 400) keypoints = keypoints[indices] # Convert (h, w) to (x, y) keypoints = torch.flip(keypoints, [1]).float() # Compute the dense descriptors cDa = self.relu(self.convDa(x)) descriptors = self.convDb(cDa) # descriptors = torch.nn.functional.normalize(descriptors, dim=1, eps=1e-6) norm = descriptors.norm(dim=1, keepdim=True).expand_as(descriptors) descriptors = descriptors / norm # Extract descriptors descriptors = sample_descriptors(keypoints[None], descriptors, 8) return keypoints, descriptors, scores def export_onnx_big(): device = torch.device("cuda") superpoint_onnx = SuperPointONNX() superpoint_onnx.load_state_dict( torch.load( "/kaggle/input/model-weight/superPointNet_finedtuned_40000_checkpoint.pth.tar" )["model_state_dict"] ) superpoint_onnx = superpoint_onnx.eval().to(device) img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) superpoint_input, _ = resize_image_cuda(img) # export to onnx torch.onnx.export( superpoint_onnx, # model being run superpoint_input, # model input (or a tuple for multiple inputs) "superpoint_big_40000.onnx", # where to save the model (can be a file or file-like object) export_params=True, # store the trained parameter weights inside the model file opset_version=16, input_names=["image"], # the model's input names output_names=["keypoints", "descriptors", "scores"], # the model's output names ) export_onnx_big()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/250/129250852.ipynb
null
null
[{"Id": 129250852, "ScriptId": 38390410, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1896085, "CreationDate": "05/12/2023 07:26:48", "VersionNumber": 2.0, "Title": "fixed_SmallSuperPoint_Interpolate", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 349.0, "LinesInsertedFromPrevious": 243.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 106.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import cv2 import numpy as np import torch from torch import nn import onnx import onnxruntime def L2NormDim(input, epsilon=1e-12): squared = input.pow(2) summed = squared.sum(dim=1, keepdim=True) sqrt_sum = summed.sqrt() return sqrt_sum # def expand_as(tensor, other_tensor, dim): # # get the original shape # # replace the shape along the specified dim with the size of other_tensor # size = other_tensor.shape[dim] # # repeat the tensor along the specified dim # repeated_tensor = tensor.repeat(1,size,1,1) # return repeated_tensor def expand_as(input, target, dim): # 计算需要重复的次数 repeats = [1] * len(target.shape) repeats[dim] = target.shape[dim] // input.shape[dim] # 在第二维度上进行重复 repeated_tensor = torch.cat([input] * repeats[dim], dim=1) # 裁剪到正确的形状 return repeated_tensor[:, : target.shape[dim], ...] def simple_nms(scores, nms_radius: int): """Fast Non-maximum suppression to remove nearby points""" zeros = torch.zeros_like(scores) max_mask = scores == torch.nn.functional.max_pool2d( scores, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius ) # for _ in range(2): # supp_mask = torch.nn.functional.max_pool2d(max_mask.float(), kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius) > 0 # supp_scores = torch.where(supp_mask, zeros, scores) # new_max_mask = supp_scores == torch.nn.functional.max_pool2d(supp_scores, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius) # max_mask = max_mask | (new_max_mask & (~supp_mask)) return torch.where(max_mask, scores, zeros) def sample_descriptors(keypoints, descriptors, s: int): """Interpolate descriptors at keypoint locations""" b, c, h, w = descriptors.shape keypoints = keypoints - s / 2 + 0.5 tmp = torch.tensor([w * s - s / 2 - 0.5, h * s - s / 2 - 0.5]).to(keypoints)[None] keypoints = keypoints / tmp keypoints = keypoints * 2 - 1 # normalize to (-1, 1) descriptors = torch.nn.functional.grid_sample( descriptors, keypoints.view(b, 1, -1, 2), mode="bilinear", align_corners=True ) descriptors = descriptors.reshape(b, c, -1) norm = descriptors.norm(dim=1, keepdim=True).expand_as(descriptors) descriptors = descriptors / norm return descriptors.reshape(c, -1).t() class SmallSuperPoint_Interpolate(nn.Module): def __init__(self): super().__init__() self.relu = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) # c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256 c1, c2, c3, c4, c5, d1 = 32, 64, 64, 128, 128, 128 self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1) conv1b_depth = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1, groups=c1) conv1b_point = nn.Conv2d(c1, c1, kernel_size=1) self.conv1b = nn.Sequential(conv1b_depth, conv1b_point) conv2a_depth = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1, groups=c1) conv2a_point = nn.Conv2d(c1, c2, kernel_size=1) self.conv2a = nn.Sequential(conv2a_depth, conv2a_point) conv2b_depth = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1, groups=c2) conv2b_point = nn.Conv2d(c2, c2, kernel_size=1) self.conv2b = nn.Sequential(conv2b_depth, conv2b_point) conv3a_depth = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1, groups=c2) conv3a_point = nn.Conv2d(c2, c3, kernel_size=1) self.conv3a = nn.Sequential(conv3a_depth, conv3a_point) conv3b_depth = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1, groups=c3) conv3b_point = nn.Conv2d(c3, c3, kernel_size=1) self.conv3b = nn.Sequential(conv3b_depth, conv3b_point) conv4a_depth = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1, groups=c3) conv4a_point = nn.Conv2d(c3, c4, kernel_size=1) self.conv4a = nn.Sequential(conv4a_depth, conv4a_point) conv4b_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4) conv4b_point = nn.Conv2d(c4, c4, kernel_size=1) self.conv4b = nn.Sequential(conv4b_depth, conv4b_point) convPa_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4) convPb_point = nn.Conv2d(c4, c5, kernel_size=1) self.convPa = nn.Sequential(convPa_depth, convPb_point) self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) convDa_depth = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, groups=c4) convDb_point = nn.Conv2d(c4, c5, kernel_size=1) self.convDa = nn.Sequential(convDa_depth, convDb_point) self.convDb = nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0) def forward(self, image): """Compute keypoints, scores, descriptors for image""" # Shared Encoder x = self.relu(self.conv1a(image)) x = self.relu(self.conv1b(x)) x = self.pool(x) x = self.relu(self.conv2a(x)) x = self.relu(self.conv2b(x)) x = self.pool(x) x = self.relu(self.conv3a(x)) x = self.relu(self.conv3b(x)) x = self.pool(x) x = self.relu(self.conv4a(x)) x = self.relu(self.conv4b(x)) # Compute the dense keypoint scores cPa = self.relu(self.convPa(x)) scores = self.convPb(cPa) scores = torch.nn.functional.softmax(scores, 1)[:, :64, :, :] b, _, h, w = scores.shape # scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8) # scores = scores.permute(0, 1, 3, 2, 4).reshape(b, 1, h*8, w*8) scores = scores.permute(0, 2, 3, 1).reshape(b * h, w, 8, 8) scores = scores.permute(0, 2, 1, 3).reshape(b, 1, h * 8, w * 8) # scores = simple_nms(scores, 4).reshape(h*8, w*8) # scores = simple_nms(scores, 4).reshape(-1) # # Extract keypoints # index_1d = torch.arange(0, h*8*w*8).to(scores) # keypoints = torch.stack([index_1d // (w*8), index_1d % (w*8)], dim=1) # Discard keypoints near the image borders # keypoints, scores = remove_borders(keypoints, scores, 4, h*8, w*8) # Keep the k keypoints with highest score # scores, indices = torch.topk(scores, 400) # keypoints = keypoints[indices] # # Convert (h, w) to (x, y) # keypoints = torch.flip(keypoints, [1]).float() # Compute the dense descriptors cDa = self.relu(self.convDa(x)) descriptors = self.convDb(cDa) # descriptors = torch.nn.functional.normalize(descriptors, dim=1, eps=1e-6) # breakpoint() # norm = descriptors.norm(dim=1, keepdim=True).expand_as(descriptors) t = L2NormDim(descriptors) norm = expand_as(t, descriptors, 1) descriptors = descriptors / norm # # Extract descriptors # descriptors = sample_descriptors_interpolate(keypoints, descriptors, 8) return descriptors, scores filename = "/kaggle/input/img-dataset/test_data/21000.jpg" def resize_image_cuda(img, resize=(640, 400)): h, w, _ = img.shape scale_w = resize[0] / w scale_h = resize[1] / h img = cv2.resize(img, resize) img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = torch.from_numpy(img)[None][None] / 255.0 return img.cuda(), (scale_w, scale_h) def resize_image_numpy(img, resize=(640, 400)): h, w, _ = img.shape scale_w = resize[0] / w scale_h = resize[1] / h img = cv2.resize(img, resize) img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = img[None][None] / 255.0 return img, (scale_w, scale_h) device = torch.device("cuda") superpoint_onnx = SmallSuperPoint_Interpolate() superpoint_onnx.load_state_dict( torch.load( "/kaggle/input/model-weight/small_superPointNet_ws_10000_checkpoint.pth.tar" )["model_state_dict"] ) superpoint_onnx = superpoint_onnx.eval().to(device) img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) superpoint_input, _ = resize_image(img) with torch.no_grad(): # import pdb;pdb.set_trace() kpts0, des0, score0 = superpoint_onnx(torch.from_numpy(superpoint_input).to("cpu")) def export_onnx_new(): device = torch.device("cuda") superpoint_onnx = SmallSuperPoint_Interpolate() superpoint_onnx.load_state_dict( torch.load( "/kaggle/input/model-weight/small_superPointNet_ws_10000_checkpoint.pth.tar" )["model_state_dict"] ) superpoint_onnx = superpoint_onnx.eval().to(device) img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) superpoint_input, _ = resize_image_cuda(img) # export to onnx torch.onnx.export( superpoint_onnx, # model being run superpoint_input, # model input (or a tuple for multiple inputs) "my_model_new.onnx", # where to save the model (can be a file or file-like object) export_params=True, # store the trained parameter weights inside the model file opset_version=11, input_names=["image"], # the model's input names output_names=["descriptors", "scores"], # the model's output names ) export_onnx_new() class SuperPointONNX(nn.Module): """SuperPoint Convolutional Detector and Descriptor SuperPoint: Self-Supervised Interest Point Detection and Description. Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. In CVPRW, 2019. https://arxiv.org/abs/1712.07629 """ def __init__(self): super().__init__() # self.config = {**self.default_config} self.relu = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256 self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1) self.conv1b = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1) self.conv2a = nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1) self.conv2b = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1) self.conv3a = nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1) self.conv3b = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1) self.conv4a = nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1) self.conv4b = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1) self.convPa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) self.convDa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convDb = nn.Conv2d(c5, 256, kernel_size=1, stride=1, padding=0) def forward(self, image): """Compute keypoints, scores, descriptors for image""" # Shared Encoder x = self.relu(self.conv1a(image)) x = self.relu(self.conv1b(x)) x = self.pool(x) x = self.relu(self.conv2a(x)) x = self.relu(self.conv2b(x)) x = self.pool(x) x = self.relu(self.conv3a(x)) x = self.relu(self.conv3b(x)) x = self.pool(x) x = self.relu(self.conv4a(x)) x = self.relu(self.conv4b(x)) # Compute the dense keypoint scores cPa = self.relu(self.convPa(x)) scores = self.convPb(cPa) scores = torch.nn.functional.softmax(scores, 1)[:, :64, :, :] b, _, h, w = scores.shape scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8) scores = scores.permute(0, 1, 3, 2, 4).reshape(b, 1, h * 8, w * 8) # scores = simple_nms(scores, 4).reshape(h*8, w*8) scores = simple_nms(scores, 4).reshape(-1) # Extract keypoints index_1d = torch.arange(0, h * 8 * w * 8).to(scores) keypoints = torch.stack([index_1d // (w * 8), index_1d % (w * 8)], dim=1) # Discard keypoints near the image borders # keypoints, scores = remove_borders(keypoints, scores, 4, h*8, w*8) # import pdb;pdb.set_trace() # scores=scores*10**6 torch.set_printoptions(precision=8) scores, indices = torch.topk(scores, 400) keypoints = keypoints[indices] # Convert (h, w) to (x, y) keypoints = torch.flip(keypoints, [1]).float() # Compute the dense descriptors cDa = self.relu(self.convDa(x)) descriptors = self.convDb(cDa) # descriptors = torch.nn.functional.normalize(descriptors, dim=1, eps=1e-6) norm = descriptors.norm(dim=1, keepdim=True).expand_as(descriptors) descriptors = descriptors / norm # Extract descriptors descriptors = sample_descriptors(keypoints[None], descriptors, 8) return keypoints, descriptors, scores def export_onnx_big(): device = torch.device("cuda") superpoint_onnx = SuperPointONNX() superpoint_onnx.load_state_dict( torch.load( "/kaggle/input/model-weight/superPointNet_finedtuned_40000_checkpoint.pth.tar" )["model_state_dict"] ) superpoint_onnx = superpoint_onnx.eval().to(device) img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) superpoint_input, _ = resize_image_cuda(img) # export to onnx torch.onnx.export( superpoint_onnx, # model being run superpoint_input, # model input (or a tuple for multiple inputs) "superpoint_big_40000.onnx", # where to save the model (can be a file or file-like object) export_params=True, # store the trained parameter weights inside the model file opset_version=16, input_names=["image"], # the model's input names output_names=["keypoints", "descriptors", "scores"], # the model's output names ) export_onnx_big()
false
0
4,795
0
4,795
4,795
129250702
<jupyter_start><jupyter_text>Robot Sar Kaggle dataset identifier: robot-sar <jupyter_script>from tensorflow import keras import keras import numpy as np from matplotlib import pyplot as plt import seaborn as sns from sklearn import metrics import os dataset = "/kaggle/input/robot-sar/SAR-robot-dataset" classes = [x for x in os.listdir(dataset)] classes image_gen = keras.preprocessing.image.ImageDataGenerator( horizontal_flip=True, fill_mode="nearest", shear_range=0.2, zoom_range=0.2, # rescale=1./255, validation_split=0.25, preprocessing_function=keras.applications.inception_v3.preprocess_input, ) test_gen = keras.preprocessing.image.ImageDataGenerator( # rescale=1/255., validation_split=0.15, preprocessing_function=keras.applications.inception_v3.preprocess_input, ) data_train = image_gen.flow_from_directory( dataset, target_size=(256, 192), batch_size=5, color_mode="rgb", classes=classes, subset="training", class_mode="sparse", seed=802, ) data_val = image_gen.flow_from_directory( dataset, target_size=(256, 192), batch_size=5, color_mode="rgb", classes=classes, subset="validation", class_mode="sparse", seed=802, ) data_test = test_gen.flow_from_directory( dataset, target_size=(256, 192), batch_size=5, color_mode="rgb", classes=classes, shuffle=True, class_mode="sparse", seed=404, subset="validation", ) model = keras.applications.InceptionV3( include_top=True, weights=None, input_tensor=keras.layers.Input(shape=(255, 192, 3)), pooling="max", classes=len(classes), classifier_activation="softmax", ) model.summary() model.compile( loss="sparse_categorical_crossentropy", metrics="accuracy", optimizer=keras.optimizers.RMSprop(learning_rate=0.00001), ) H = model.fit( data_train, epochs=50, validation_data=data_val, validation_steps=data_val.n // data_val.batch_size, steps_per_epoch=data_train.n // data_train.batch_size, batch_size=5, ) x = np.concatenate([data_test.next()[0] for i in range(data_test.__len__())]) y = np.concatenate([data_test.next()[1] for i in range(data_test.__len__())]) print(x.shape) print(y.shape) prediction = model.predict(data_test, batch_size=4) y_pred = np.argmax(prediction, axis=1) accuracy = model.evaluate(data_test, batch_size=4, return_dict=False)[1] * 100 print(metrics.classification_report(y, y_pred)) mat = metrics.confusion_matrix(y, y_pred) mat sns.heatmap(mat, annot=True, fmt=".1f") model.save("/kaggle/working/SAR-terrain-v2.h5")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/250/129250702.ipynb
robot-sar
aminidwipuspitasari
[{"Id": 129250702, "ScriptId": 37121029, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9194413, "CreationDate": "05/12/2023 07:25:34", "VersionNumber": 7.0, "Title": "InceptionNetv3-to-MobileNetV2-SAR", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 105.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 93.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185130004, "KernelVersionId": 129250702, "SourceDatasetVersionId": 5667199}]
[{"Id": 5667199, "DatasetId": 3257616, "DatasourceVersionId": 5742682, "CreatorUserId": 9194413, "LicenseName": "Unknown", "CreationDate": "05/12/2023 04:34:19", "VersionNumber": 1.0, "Title": "Robot Sar", "Slug": "robot-sar", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3257616, "CreatorUserId": 9194413, "OwnerUserId": 9194413.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5667199.0, "CurrentDatasourceVersionId": 5742682.0, "ForumId": 3323128, "Type": 2, "CreationDate": "05/12/2023 04:34:19", "LastActivityDate": "05/12/2023", "TotalViews": 50, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 9194413, "UserName": "aminidwipuspitasari", "DisplayName": "Amini Dwi Puspita Sari", "RegisterDate": "12/17/2021", "PerformanceTier": 0}]
from tensorflow import keras import keras import numpy as np from matplotlib import pyplot as plt import seaborn as sns from sklearn import metrics import os dataset = "/kaggle/input/robot-sar/SAR-robot-dataset" classes = [x for x in os.listdir(dataset)] classes image_gen = keras.preprocessing.image.ImageDataGenerator( horizontal_flip=True, fill_mode="nearest", shear_range=0.2, zoom_range=0.2, # rescale=1./255, validation_split=0.25, preprocessing_function=keras.applications.inception_v3.preprocess_input, ) test_gen = keras.preprocessing.image.ImageDataGenerator( # rescale=1/255., validation_split=0.15, preprocessing_function=keras.applications.inception_v3.preprocess_input, ) data_train = image_gen.flow_from_directory( dataset, target_size=(256, 192), batch_size=5, color_mode="rgb", classes=classes, subset="training", class_mode="sparse", seed=802, ) data_val = image_gen.flow_from_directory( dataset, target_size=(256, 192), batch_size=5, color_mode="rgb", classes=classes, subset="validation", class_mode="sparse", seed=802, ) data_test = test_gen.flow_from_directory( dataset, target_size=(256, 192), batch_size=5, color_mode="rgb", classes=classes, shuffle=True, class_mode="sparse", seed=404, subset="validation", ) model = keras.applications.InceptionV3( include_top=True, weights=None, input_tensor=keras.layers.Input(shape=(255, 192, 3)), pooling="max", classes=len(classes), classifier_activation="softmax", ) model.summary() model.compile( loss="sparse_categorical_crossentropy", metrics="accuracy", optimizer=keras.optimizers.RMSprop(learning_rate=0.00001), ) H = model.fit( data_train, epochs=50, validation_data=data_val, validation_steps=data_val.n // data_val.batch_size, steps_per_epoch=data_train.n // data_train.batch_size, batch_size=5, ) x = np.concatenate([data_test.next()[0] for i in range(data_test.__len__())]) y = np.concatenate([data_test.next()[1] for i in range(data_test.__len__())]) print(x.shape) print(y.shape) prediction = model.predict(data_test, batch_size=4) y_pred = np.argmax(prediction, axis=1) accuracy = model.evaluate(data_test, batch_size=4, return_dict=False)[1] * 100 print(metrics.classification_report(y, y_pred)) mat = metrics.confusion_matrix(y, y_pred) mat sns.heatmap(mat, annot=True, fmt=".1f") model.save("/kaggle/working/SAR-terrain-v2.h5")
false
0
832
0
851
832
129250711
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re from scipy.sparse import hstack, coo_matrix import nltk from nltk.stem.snowball import SnowballStemmer from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from nltk.tokenize import word_tokenize, sent_tokenize from nltk import pos_tag from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df_train = pd.read_csv("../input/nlp-getting-started/train.csv") df_train.tail(10) text = df_train["text"].values.tolist() line_curr = text[7609] print(line_curr) tokens = nltk.word_tokenize(line_curr) print(tokens) pos = nltk.pos_tag(tokens) # print(pos) stemmer = SnowballStemmer("english") for tok in tokens: stem = stemmer.stem(tok) print(stem) text1 = [] for t in text: tokens = nltk.word_tokenize(t.lower()) s = "" for token in tokens: stem = stemmer.stem(token) s += stem + " " text1.append(s) text = text1 # ### Expressions regulières (REGEX) line = "We always try to bring the heavy. #metal #RT http://t.co/YAo1e0xngw in the show. Here you are the link : http://www.t.co/YAo1e0xngw." pattern_url = r"http(s)?[^\s\n\t\r]+" pattern_author = r"@\w+" if re.search(pattern_url, line): p = re.compile(pattern_url) l = p.sub("URL", line) print(l) urls = re.findall(pattern_url, line) print(urls) for url in urls: print(url) # ### Stopwords stop_words = stopwords.words("english") print(stop_words) cv = CountVectorizer( stop_words=stop_words, min_df=10, token_pattern=r"(?u)\b[A-Za-z]{2,}\b" ) out = cv.fit_transform(text) voc = cv.get_feature_names_out() print(voc) print(len(voc)) X = out.toarray() X.shape Y = df_train["target"] print(Y.shape, type(Y)) YY = df_train.loc[:, ["target"]] print(YY.shape) pred = clf.predict(X) print(voc[0:30]) for w in voc[0:30]: if w.isalpha(): print(w, "alpha") print(out) # ### Tokenisation line_curr = line_curr.lower() tokens = nltk.word_tokenize(line_curr) print(tokens) pos = nltk.pos_tag(tokens) # print(pos) tokens = [word.lower() for word in tokens if word not in stop_words] print(tokens) Line = " ".join(tokens) print(line) for word in pos: if "NN" in word[1] or "JJ" in word[1]: print(word[0]) # ### Stemmatisation stemmer = SnowballStemmer("english") for tok in tokens: stem = stemmer.stem(tok) print(stem)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/250/129250711.ipynb
null
null
[{"Id": 129250711, "ScriptId": 37233868, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3616012, "CreationDate": "05/12/2023 07:25:37", "VersionNumber": 3.0, "Title": "GEMA_M2_NLP_PreTraitement", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 128.0, "LinesInsertedFromPrevious": 72.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 56.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re from scipy.sparse import hstack, coo_matrix import nltk from nltk.stem.snowball import SnowballStemmer from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from nltk.tokenize import word_tokenize, sent_tokenize from nltk import pos_tag from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df_train = pd.read_csv("../input/nlp-getting-started/train.csv") df_train.tail(10) text = df_train["text"].values.tolist() line_curr = text[7609] print(line_curr) tokens = nltk.word_tokenize(line_curr) print(tokens) pos = nltk.pos_tag(tokens) # print(pos) stemmer = SnowballStemmer("english") for tok in tokens: stem = stemmer.stem(tok) print(stem) text1 = [] for t in text: tokens = nltk.word_tokenize(t.lower()) s = "" for token in tokens: stem = stemmer.stem(token) s += stem + " " text1.append(s) text = text1 # ### Expressions regulières (REGEX) line = "We always try to bring the heavy. #metal #RT http://t.co/YAo1e0xngw in the show. Here you are the link : http://www.t.co/YAo1e0xngw." pattern_url = r"http(s)?[^\s\n\t\r]+" pattern_author = r"@\w+" if re.search(pattern_url, line): p = re.compile(pattern_url) l = p.sub("URL", line) print(l) urls = re.findall(pattern_url, line) print(urls) for url in urls: print(url) # ### Stopwords stop_words = stopwords.words("english") print(stop_words) cv = CountVectorizer( stop_words=stop_words, min_df=10, token_pattern=r"(?u)\b[A-Za-z]{2,}\b" ) out = cv.fit_transform(text) voc = cv.get_feature_names_out() print(voc) print(len(voc)) X = out.toarray() X.shape Y = df_train["target"] print(Y.shape, type(Y)) YY = df_train.loc[:, ["target"]] print(YY.shape) pred = clf.predict(X) print(voc[0:30]) for w in voc[0:30]: if w.isalpha(): print(w, "alpha") print(out) # ### Tokenisation line_curr = line_curr.lower() tokens = nltk.word_tokenize(line_curr) print(tokens) pos = nltk.pos_tag(tokens) # print(pos) tokens = [word.lower() for word in tokens if word not in stop_words] print(tokens) Line = " ".join(tokens) print(line) for word in pos: if "NN" in word[1] or "JJ" in word[1]: print(word[0]) # ### Stemmatisation stemmer = SnowballStemmer("english") for tok in tokens: stem = stemmer.stem(tok) print(stem)
false
0
903
0
903
903
129250155
# ## Introduction # Prediction of Wild Blueberry Yield (Kaggle Competition) # Features Unit Description: # - Clonesize m2 The average blueberry clone size in the field # - Honeybee bees/m2/min Honeybee density in the field # - Bumbles bees/m2/min Bumblebee density in the field # - Andrena bees/m2/min Andrena bee density in the field # - Osmia bees/m2/min Osmia bee density in the field # - MaxOfUpperTRange ℃ The highest record of the upper band daily air temperature during the bloom season # - MinOfUpperTRange ℃ The lowest record of the upper band daily air temperature # - AverageOfUpperTRange ℃ The average of the upper band daily air temperature # - MaxOfLowerTRange ℃ The highest record of the lower band daily air temperature # - MinOfLowerTRange ℃ The lowest record of the lower band daily air temperature # - AverageOfLowerTRange ℃ The average of the lower band daily air temperature # - RainingDays Day The total number of days during the bloom season, each of which has precipitation larger than zero # - AverageRainingDays Day The average of raining days of the entire bloom season # Goal: # - The submission requires us to predict blueberry yield for 10194 id in the test.csv # ## Getting Started # #### Import libraries import warnings warnings.filterwarnings("ignore") # Import Neccessary libraries import numpy as np import pandas as pd # Import Visualization libraries import matplotlib.pyplot as plt import seaborn as sns sns.set() import plotly.express as px # Import Statistics libraries from scipy import stats from scipy.stats import norm # #### Import dataset data_train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") data_test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") data_train.head() data_train.shape print(data_train.info()) # #### Preprocess the data def preprocess(df): df = df.drop(["id"], axis=1) # drop id column df["fruit_seed"] = df["fruitset"] * df["seeds"] # calculate total seeds in a tree return df from sklearn.model_selection import train_test_split X = preprocess(data_train) X_test_full = preprocess(data_test) # Remove rows with missing target, separate target from predictors X.dropna(axis=0, subset=["yield"], inplace=True) y = X["yield"] X.drop(["yield"], axis=1, inplace=True) # Break off validation set from training data X_train_full, X_valid_full, y_train, y_valid = train_test_split( X, y, train_size=0.8, test_size=0.2, random_state=0 ) numeric_cols = [ cname for cname in X_train_full.columns if X_train_full[cname].dtype in ["int64", "float64"] ] """ # Select categorical columns with relatively low cardinality (convenient but arbitrary) low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == "object"] """ # Select numeric columns numeric_cols = [ cname for cname in X_train_full.columns if X_train_full[cname].dtype in ["int64", "float64"] ] # Keep selected columns only my_cols = numeric_cols # + low_cardinality_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() X_test = X_test_full[my_cols].copy() """ # One-hot encode the data (to shorten the code, we use pandas) `X_train = pd.get_dummies(X_train) X_valid = pd.get_dummies(X_valid) X_test = pd.get_dummies(X_test) """ # Making sure all columns are aligned by dropping columns that are not in X_train X_train, X_valid = X_train.align(X_valid, join="left", axis=1) X_train, X_test = X_train.align(X_test, join="left", axis=1) # ## Build model from xgboost import XGBRegressor from sklearn.metrics import mean_absolute_error # Baseline model my_model_1 = XGBRegressor(random_state=0) my_model_1.fit(X_train, y_train) predictions_1 = my_model_1.predict(X_valid) mae_1 = mean_absolute_error(predictions_1, y_valid) print("Mean Absolute Error:", mae_1) # Model 2 my_model_2 = XGBRegressor(n_estimators=1000, learning_rate=0.05) my_model_2.fit(X_train, y_train) predictions_2 = my_model_2.predict(X_valid) mae_2 = mean_absolute_error(predictions_2, y_valid) print("Mean Absolute Error:", mae_2) # Model 3 my_model_3 = XGBRegressor(n_estimators=2000, learning_rate=0.05) my_model_3.fit(X_train, y_train) predictions_3 = my_model_3.predict(X_valid) mae_3 = mean_absolute_error(predictions_3, y_valid) print("Mean Absolute Error:", mae_2) # Model 4 my_model_4 = XGBRegressor(n_estimators=1000, learning_rate=0.005) my_model_4.fit(X_train, y_train) predictions_4 = my_model_4.predict(X_valid) mae_4 = mean_absolute_error(predictions_4, y_valid) print("Mean Absolute Error:", mae_4) # Choose model 4 final_prediction = my_model_4.predict(X_test) output = pd.DataFrame({"id": data_test.id, "yield": final_prediction}) print(output) output.to_csv("submission.csv", index=False) print("Your submission was successfully saved!")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/250/129250155.ipynb
null
null
[{"Id": 129250155, "ScriptId": 38377831, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11919322, "CreationDate": "05/12/2023 07:19:33", "VersionNumber": 1.0, "Title": "\ud83c\udf47 Prediction of Wild Blueberry Yield", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 142.0, "LinesInsertedFromPrevious": 142.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## Introduction # Prediction of Wild Blueberry Yield (Kaggle Competition) # Features Unit Description: # - Clonesize m2 The average blueberry clone size in the field # - Honeybee bees/m2/min Honeybee density in the field # - Bumbles bees/m2/min Bumblebee density in the field # - Andrena bees/m2/min Andrena bee density in the field # - Osmia bees/m2/min Osmia bee density in the field # - MaxOfUpperTRange ℃ The highest record of the upper band daily air temperature during the bloom season # - MinOfUpperTRange ℃ The lowest record of the upper band daily air temperature # - AverageOfUpperTRange ℃ The average of the upper band daily air temperature # - MaxOfLowerTRange ℃ The highest record of the lower band daily air temperature # - MinOfLowerTRange ℃ The lowest record of the lower band daily air temperature # - AverageOfLowerTRange ℃ The average of the lower band daily air temperature # - RainingDays Day The total number of days during the bloom season, each of which has precipitation larger than zero # - AverageRainingDays Day The average of raining days of the entire bloom season # Goal: # - The submission requires us to predict blueberry yield for 10194 id in the test.csv # ## Getting Started # #### Import libraries import warnings warnings.filterwarnings("ignore") # Import Neccessary libraries import numpy as np import pandas as pd # Import Visualization libraries import matplotlib.pyplot as plt import seaborn as sns sns.set() import plotly.express as px # Import Statistics libraries from scipy import stats from scipy.stats import norm # #### Import dataset data_train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") data_test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") data_train.head() data_train.shape print(data_train.info()) # #### Preprocess the data def preprocess(df): df = df.drop(["id"], axis=1) # drop id column df["fruit_seed"] = df["fruitset"] * df["seeds"] # calculate total seeds in a tree return df from sklearn.model_selection import train_test_split X = preprocess(data_train) X_test_full = preprocess(data_test) # Remove rows with missing target, separate target from predictors X.dropna(axis=0, subset=["yield"], inplace=True) y = X["yield"] X.drop(["yield"], axis=1, inplace=True) # Break off validation set from training data X_train_full, X_valid_full, y_train, y_valid = train_test_split( X, y, train_size=0.8, test_size=0.2, random_state=0 ) numeric_cols = [ cname for cname in X_train_full.columns if X_train_full[cname].dtype in ["int64", "float64"] ] """ # Select categorical columns with relatively low cardinality (convenient but arbitrary) low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == "object"] """ # Select numeric columns numeric_cols = [ cname for cname in X_train_full.columns if X_train_full[cname].dtype in ["int64", "float64"] ] # Keep selected columns only my_cols = numeric_cols # + low_cardinality_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() X_test = X_test_full[my_cols].copy() """ # One-hot encode the data (to shorten the code, we use pandas) `X_train = pd.get_dummies(X_train) X_valid = pd.get_dummies(X_valid) X_test = pd.get_dummies(X_test) """ # Making sure all columns are aligned by dropping columns that are not in X_train X_train, X_valid = X_train.align(X_valid, join="left", axis=1) X_train, X_test = X_train.align(X_test, join="left", axis=1) # ## Build model from xgboost import XGBRegressor from sklearn.metrics import mean_absolute_error # Baseline model my_model_1 = XGBRegressor(random_state=0) my_model_1.fit(X_train, y_train) predictions_1 = my_model_1.predict(X_valid) mae_1 = mean_absolute_error(predictions_1, y_valid) print("Mean Absolute Error:", mae_1) # Model 2 my_model_2 = XGBRegressor(n_estimators=1000, learning_rate=0.05) my_model_2.fit(X_train, y_train) predictions_2 = my_model_2.predict(X_valid) mae_2 = mean_absolute_error(predictions_2, y_valid) print("Mean Absolute Error:", mae_2) # Model 3 my_model_3 = XGBRegressor(n_estimators=2000, learning_rate=0.05) my_model_3.fit(X_train, y_train) predictions_3 = my_model_3.predict(X_valid) mae_3 = mean_absolute_error(predictions_3, y_valid) print("Mean Absolute Error:", mae_2) # Model 4 my_model_4 = XGBRegressor(n_estimators=1000, learning_rate=0.005) my_model_4.fit(X_train, y_train) predictions_4 = my_model_4.predict(X_valid) mae_4 = mean_absolute_error(predictions_4, y_valid) print("Mean Absolute Error:", mae_4) # Choose model 4 final_prediction = my_model_4.predict(X_test) output = pd.DataFrame({"id": data_test.id, "yield": final_prediction}) print(output) output.to_csv("submission.csv", index=False) print("Your submission was successfully saved!")
false
0
1,592
0
1,592
1,592
129250681
<jupyter_start><jupyter_text>DATA WP5 Variabilite RunResults Each file of this dataset correspond to a single run of a training process and contains the related forgetscore computed during this process. Forgetscore's after training with : - ` 50 ` epochs - ` 1e-4` learning rate - ` 1024 ` batch size - ` resNet ` based model [Research paper](https://arxiv.org/pdf/1812.05159.pdf) on the topic Used to show stability and reliability of the dataset Kaggle dataset identifier: wp5-variabilite-runresult <jupyter_script>import os import re import sys import torch import numpy as np import matplotlib.cm as cm import matplotlib.pyplot as plt # Add to path the current lib sys.path.insert(1, "/kaggle/input/data-wp5-cifar10-context") # Load dataset DATA = torch.stack( [ torch.load( f"/kaggle/input/wp5-variabilite-runresult/{file}", map_location=torch.device("cpu"), ) for file in os.listdir("/kaggle/input/wp5-variabilite-runresult") ] ) # Filter dataset CONTEXT = torch.load("/kaggle/input/data-wp5-cifar10-context/context.pt") INDEX = [id for _, _, id in CONTEXT.train_dataset] LABEL = [lb for _, lb, _ in CONTEXT.train_dataset] DATA = DATA[:, INDEX] # Compute dataset relief FORGETSCORE_MAX = int(torch.max(DATA)) FORGETSCORE_MIN = 0 HISTOGRAMS = torch.transpose( torch.stack( [ torch.sum(DATA == i, axis=0) for i in range(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, 1) ] ), 1, 0, ) # Ordering for better render Z = HISTOGRAMS.detach().numpy() F = np.argsort(np.argmax(Z, axis=1)) Z = Z[F] L = np.array(LABEL)[F] # Prepare surface x_dim, y_dim = HISTOGRAMS.shape X = np.arange(x_dim) Y = np.arange(y_dim) X, Y = np.meshgrid(Y, X) # Render XXX = np.arange(40 + 1) / 40 FIG, AX = plt.subplots(1, figsize=(20, 0.4)) AX.scatter(XXX, np.ones(41), c=cm.coolwarm(XXX), s=200) AX.set_xticks(XXX, np.arange(41)) AX.set_yticks([]) plt.show() # ### Pure fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(10, 10)) ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) ax.set_xlabel("Input ForgetScore") ax.set_ylabel("Input ") ax.set_zlabel("Run count") ax.set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=2)) ax.set_yticks([]) ax.set_zticks([0, 20, 40]) ax.view_init(elev=90, azim=180) fig.show() plt.show() # #### Pure proj fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(10, 10)) ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) ax.set_xlabel("Input ForgetScore") ax.set_ylabel("Input ") ax.set_zlabel("Run count") ax.set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=2)) ax.set_yticks([]) ax.set_zticks([]) ax.set_proj_type("ortho") ax.view_init(elev=90, azim=180) fig.show() plt.show() # #### Group split fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(10, 10)) ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) TMP_MAX_HEIGHT = np.max(Z) TMP_GRP_SIZE = [12529, 9655, 8205, 9655] TMP_THICKNESS = 140 for i in range(3): ax.plot_surface( np.array([[FORGETSCORE_MIN - 0.5, FORGETSCORE_MAX + 0.5]] * 2), np.array( [ [sum(TMP_GRP_SIZE[: i + 1]) + (TMP_THICKNESS / 2)] * 2, [sum(TMP_GRP_SIZE[: i + 1]) - (TMP_THICKNESS / 2)] * 2, ] ), np.array([[TMP_MAX_HEIGHT + 1] * 2, [TMP_MAX_HEIGHT + 2] * 2]), color="k", ) for i in range(4): ax.text( FORGETSCORE_MAX + 0.5, sum(TMP_GRP_SIZE[:i]) + TMP_GRP_SIZE[i] / 2, 60, s=f"G{i+1}", horizontalalignment="center", verticalalignment="center", fontsize=8, ) ax.set_proj_type("ortho") # FOV = 90 deg ax.set_xlabel("Input ForgetScore") ax.set_ylabel("Input ") ax.set_zlabel("Run count") ax.set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=2)) ax.set_yticks([]) ax.set_zticks([]) ax.set_xlim(0, 20) ax.view_init(elev=90, azim=180) fig.show() plt.show() # ### 3D fig, ax = plt.subplots(2, subplot_kw={"projection": "3d"}, figsize=(10, 10)) for i in range(2): ax[i].plot_surface(X, Y, Z, cmap=cm.coolwarm) ax[i].set_xlabel("Input ForgetScore") ax[i].set_ylabel("Input ") ax[i].set_zlabel("Run count") ax[i].set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=2)) ax[i].set_yticks([]) ax[i].set_zticks([0, 20, 40]) ax[i].view_init(elev=45, azim=45 * (i - 0.5) * 2) fig.show() plt.show() # ### 3D Groups fig, ax = plt.subplots(2, subplot_kw={"projection": "3d"}, figsize=(10, 10)) TMP_MAX_HEIGHT = np.max(Z) TMP_GRP_SIZE = [12529, 9655, 8205, 9655] TMP_THICKNESS = 140 # for i in range(4): # ax.text( FORGETSCORE_MAX+.5 , sum(TMP_GRP_SIZE[:i]) + TMP_GRP_SIZE[i]/2 , 60 , s=f"G{i+1}" , horizontalalignment='center' , verticalalignment='center' , fontsize = 8 ) for i in range(2): ax[i].plot_surface(X, Y, Z, cmap=cm.coolwarm) ax[i].set_xlabel("Input ForgetScore") ax[i].set_ylabel("Input ") ax[i].set_zlabel("Run count") ax[i].set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=4)) ax[i].set_yticks([]) ax[i].set_zticks([0, 20, 40]) ax[i].view_init(elev=30, azim=10 * (i - 0.5) * 2) for j in range(3): ax[i].plot_surface( np.array([[FORGETSCORE_MIN - 0.5, FORGETSCORE_MAX + 0.5]] * 2), np.array( [ [sum(TMP_GRP_SIZE[: j + 1]) + (TMP_THICKNESS / 2)] * 2, [sum(TMP_GRP_SIZE[: j + 1]) - (TMP_THICKNESS / 2)] * 2, ] ), np.array([[-5] * 2, [30] * 2]), color="#50505050", ) fig.show() plt.show() # ### Information condensé fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(7, 7)) ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) [ ax.scatter(FORGETSCORE_MAX + 1 + l / 2, i, -0.1, c=cm.tab10(l), s=0.04) for i, l in enumerate(L) ] ax.set_xlabel("Input ForgetScore") ax.set_ylabel("Input ") ax.set_zlabel("Run count") ax.set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=2)) ax.set_yticks([]) ax.set_zticks([0, 20, 40]) ax.view_init(elev=90, azim=180) fig.show() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/250/129250681.ipynb
wp5-variabilite-runresult
mathieutrohm
[{"Id": 129250681, "ScriptId": 37247089, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14206278, "CreationDate": "05/12/2023 07:25:21", "VersionNumber": 6.0, "Title": "\ud83e\uddd1\u200d\ud83c\udfeb Demo Dataset : wp5-variabilite-runresult", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 193.0, "LinesInsertedFromPrevious": 3.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 190.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185129972, "KernelVersionId": 129250681, "SourceDatasetVersionId": 5260254}, {"Id": 185129973, "KernelVersionId": 129250681, "SourceDatasetVersionId": 5404860}]
[{"Id": 5260254, "DatasetId": 3056826, "DatasourceVersionId": 5333124, "CreatorUserId": 14206278, "LicenseName": "Other (specified in description)", "CreationDate": "03/29/2023 07:15:36", "VersionNumber": 2.0, "Title": "DATA WP5 Variabilite RunResults", "Slug": "wp5-variabilite-runresult", "Subtitle": "40 training, with ForgetScore, same config, different seeds", "Description": "Each file of this dataset correspond to a single run of a training process and contains the related forgetscore computed during this process.\n\nForgetscore's after training with :\n- ` 50 ` epochs\n- ` 1e-4` learning rate\n- ` 1024 ` batch size \n- ` resNet ` based model\n\n[Research paper](https://arxiv.org/pdf/1812.05159.pdf) on the topic\n\nUsed to show stability and reliability of the dataset", "VersionNotes": "Data Update 2023/03/29", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3056826, "CreatorUserId": 14206278, "OwnerUserId": 14206278.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5260254.0, "CurrentDatasourceVersionId": 5333124.0, "ForumId": 3119437, "Type": 2, "CreationDate": "03/28/2023 07:51:13", "LastActivityDate": "03/28/2023", "TotalViews": 102, "TotalDownloads": 5, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 14206278, "UserName": "mathieutrohm", "DisplayName": "Mathieu ~ Trohm", "RegisterDate": "03/17/2023", "PerformanceTier": 0}]
import os import re import sys import torch import numpy as np import matplotlib.cm as cm import matplotlib.pyplot as plt # Add to path the current lib sys.path.insert(1, "/kaggle/input/data-wp5-cifar10-context") # Load dataset DATA = torch.stack( [ torch.load( f"/kaggle/input/wp5-variabilite-runresult/{file}", map_location=torch.device("cpu"), ) for file in os.listdir("/kaggle/input/wp5-variabilite-runresult") ] ) # Filter dataset CONTEXT = torch.load("/kaggle/input/data-wp5-cifar10-context/context.pt") INDEX = [id for _, _, id in CONTEXT.train_dataset] LABEL = [lb for _, lb, _ in CONTEXT.train_dataset] DATA = DATA[:, INDEX] # Compute dataset relief FORGETSCORE_MAX = int(torch.max(DATA)) FORGETSCORE_MIN = 0 HISTOGRAMS = torch.transpose( torch.stack( [ torch.sum(DATA == i, axis=0) for i in range(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, 1) ] ), 1, 0, ) # Ordering for better render Z = HISTOGRAMS.detach().numpy() F = np.argsort(np.argmax(Z, axis=1)) Z = Z[F] L = np.array(LABEL)[F] # Prepare surface x_dim, y_dim = HISTOGRAMS.shape X = np.arange(x_dim) Y = np.arange(y_dim) X, Y = np.meshgrid(Y, X) # Render XXX = np.arange(40 + 1) / 40 FIG, AX = plt.subplots(1, figsize=(20, 0.4)) AX.scatter(XXX, np.ones(41), c=cm.coolwarm(XXX), s=200) AX.set_xticks(XXX, np.arange(41)) AX.set_yticks([]) plt.show() # ### Pure fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(10, 10)) ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) ax.set_xlabel("Input ForgetScore") ax.set_ylabel("Input ") ax.set_zlabel("Run count") ax.set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=2)) ax.set_yticks([]) ax.set_zticks([0, 20, 40]) ax.view_init(elev=90, azim=180) fig.show() plt.show() # #### Pure proj fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(10, 10)) ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) ax.set_xlabel("Input ForgetScore") ax.set_ylabel("Input ") ax.set_zlabel("Run count") ax.set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=2)) ax.set_yticks([]) ax.set_zticks([]) ax.set_proj_type("ortho") ax.view_init(elev=90, azim=180) fig.show() plt.show() # #### Group split fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(10, 10)) ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) TMP_MAX_HEIGHT = np.max(Z) TMP_GRP_SIZE = [12529, 9655, 8205, 9655] TMP_THICKNESS = 140 for i in range(3): ax.plot_surface( np.array([[FORGETSCORE_MIN - 0.5, FORGETSCORE_MAX + 0.5]] * 2), np.array( [ [sum(TMP_GRP_SIZE[: i + 1]) + (TMP_THICKNESS / 2)] * 2, [sum(TMP_GRP_SIZE[: i + 1]) - (TMP_THICKNESS / 2)] * 2, ] ), np.array([[TMP_MAX_HEIGHT + 1] * 2, [TMP_MAX_HEIGHT + 2] * 2]), color="k", ) for i in range(4): ax.text( FORGETSCORE_MAX + 0.5, sum(TMP_GRP_SIZE[:i]) + TMP_GRP_SIZE[i] / 2, 60, s=f"G{i+1}", horizontalalignment="center", verticalalignment="center", fontsize=8, ) ax.set_proj_type("ortho") # FOV = 90 deg ax.set_xlabel("Input ForgetScore") ax.set_ylabel("Input ") ax.set_zlabel("Run count") ax.set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=2)) ax.set_yticks([]) ax.set_zticks([]) ax.set_xlim(0, 20) ax.view_init(elev=90, azim=180) fig.show() plt.show() # ### 3D fig, ax = plt.subplots(2, subplot_kw={"projection": "3d"}, figsize=(10, 10)) for i in range(2): ax[i].plot_surface(X, Y, Z, cmap=cm.coolwarm) ax[i].set_xlabel("Input ForgetScore") ax[i].set_ylabel("Input ") ax[i].set_zlabel("Run count") ax[i].set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=2)) ax[i].set_yticks([]) ax[i].set_zticks([0, 20, 40]) ax[i].view_init(elev=45, azim=45 * (i - 0.5) * 2) fig.show() plt.show() # ### 3D Groups fig, ax = plt.subplots(2, subplot_kw={"projection": "3d"}, figsize=(10, 10)) TMP_MAX_HEIGHT = np.max(Z) TMP_GRP_SIZE = [12529, 9655, 8205, 9655] TMP_THICKNESS = 140 # for i in range(4): # ax.text( FORGETSCORE_MAX+.5 , sum(TMP_GRP_SIZE[:i]) + TMP_GRP_SIZE[i]/2 , 60 , s=f"G{i+1}" , horizontalalignment='center' , verticalalignment='center' , fontsize = 8 ) for i in range(2): ax[i].plot_surface(X, Y, Z, cmap=cm.coolwarm) ax[i].set_xlabel("Input ForgetScore") ax[i].set_ylabel("Input ") ax[i].set_zlabel("Run count") ax[i].set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=4)) ax[i].set_yticks([]) ax[i].set_zticks([0, 20, 40]) ax[i].view_init(elev=30, azim=10 * (i - 0.5) * 2) for j in range(3): ax[i].plot_surface( np.array([[FORGETSCORE_MIN - 0.5, FORGETSCORE_MAX + 0.5]] * 2), np.array( [ [sum(TMP_GRP_SIZE[: j + 1]) + (TMP_THICKNESS / 2)] * 2, [sum(TMP_GRP_SIZE[: j + 1]) - (TMP_THICKNESS / 2)] * 2, ] ), np.array([[-5] * 2, [30] * 2]), color="#50505050", ) fig.show() plt.show() # ### Information condensé fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(7, 7)) ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) [ ax.scatter(FORGETSCORE_MAX + 1 + l / 2, i, -0.1, c=cm.tab10(l), s=0.04) for i, l in enumerate(L) ] ax.set_xlabel("Input ForgetScore") ax.set_ylabel("Input ") ax.set_zlabel("Run count") ax.set_xticks(np.arange(FORGETSCORE_MIN, FORGETSCORE_MAX + 1, step=2)) ax.set_yticks([]) ax.set_zticks([0, 20, 40]) ax.view_init(elev=90, azim=180) fig.show() plt.show()
false
0
2,299
0
2,448
2,299
129357290
# ## Table of Contents # + [Intoduction](#Introduction:) # + [Data Description](#Data-Description) # + [Features Unit Description](#Features-Unit-Description) # # + [Importing Libraries & Data](#Importing-Libraries-&-Data) # + [Exploratory Data Analysis(EDA)](#Exploratory-Data-Analysis) # + [Breif analysis about the train by using the profileReport](#Breif-analysis-about-the-train-by-using-the-profileReport) # + [Droping the unwanted columns](#Droping-the-unwanted-columns) # + [Discription of train_df & original_df](#Discription-of-train_df-&-original_df) # + [Data visualization](#Data-visualization) # + [Displaying the Yield of the train_df & original_df](#Displaying-the-Yield-of-the-train_df-&-original_df) # + [Feature Correlation of train_df & original_df ](#Feature-Correlation-of-train_df-&-original_df) # + [Visualizing the data in box plot](#Visualizing-the-data-in-box-plot) # + [Removing the outliers](#Removing-the-outliers) # + [Droping the Highly correlated columns](#Droping-the-Highly-correlated-columns) # + [Finding the no.of null and duplicates](#Finding-the-no.of-null-and-duplicates) # + [Droping the Duplicates](#Droping-the-Duplicates) # + [Baseline Model](#Baseline-Model) # + [Predcting the values with the test_df](#Predcting-the-values-with-the-test_df) # + [Creating the Submission file](#Creating-the-Submission-file) # + [Why I used ipywidgets](#Why-I-used-ipywidgets-?) # ## Introduction: from IPython.display import Image Image("/kaggle/input/blueberry/blueberry.jpeg") # #### Data Description # Blueberries: Exploring Key Features and Their Impact # Blueberries are not just delicious fruits; they also possess several fascinating features that contribute to their growth, development, and overall value. In this essay, we will delve into the various features associated with blueberries and understand their significance. # Clone Size: # The clone size refers to the number of genetically identical blueberry plants that originate from a single parent plant. Blueberries are commonly propagated through cloning to ensure consistency in traits, such as fruit size, flavor, and disease resistance. Large clone sizes allow for the production of uniform blueberries that meet specific market demands. # Pollinators: # Blueberries rely on various pollinators, including honeybees, bumblebees, and native bees like Andrena and Osmia species. These pollinators play a vital role in transferring pollen from the male flower parts to the female flower parts, enabling fertilization and fruit formation. The presence of a diverse and abundant pollinator population ensures successful pollination and optimal blueberry production. # Temperature Ranges: # Blueberries exhibit preferences for specific temperature ranges during their growth stages. The maximum and minimum upper temperature ranges indicate the upper limits and lower limits, respectively, of temperatures that blueberry plants can tolerate without detrimental effects. These temperature ranges are essential for determining suitable growing conditions and managing blueberry crops in different regions. # Raining Days and Average Raining Days: # Blueberries require an adequate water supply to thrive, making rainfall an important factor. The column "Raining Days" represents the number of days on which rain occurs, while "Average Raining Days" provides an average count over a specific period. Sufficient rainfall ensures proper hydration for blueberry plants, supports nutrient uptake, and contributes to overall plant health and productivity. # Fruit Set, Fruit Mass, Seeds, and Yield: # These features are directly related to the blueberry fruit itself and its productivity. "Fruit set" refers to the process of fruit development and the successful formation of berries. It indicates the percentage of flowers that progress to become mature fruits. "Fruit mass" represents the weight of individual blueberries and is an essential factor in determining market value and consumer preference. The "seeds" column indicates the number of seeds present within each blueberry, contributing to its nutritional composition and potential for propagation. Lastly, "yield" refers to the total quantity of blueberries harvested from a given area or number of plants, providing an important measure of productivity and economic value. # Understanding and managing these features are crucial for blueberry growers and researchers. By optimizing clone size, ensuring a diverse pollinator population, maintaining suitable temperature ranges, monitoring rainfall patterns, and enhancing fruit set, fruit mass, seed count, and yield, growers can maximize blueberry production and meet market demands. # In conclusion, blueberries possess a range of features that influence their growth, quality, and productivity. The interplay of clone size, pollinators, temperature ranges, rainfall, and fruit characteristics all contribute to the successful cultivation and utilization of blueberries. Whether it's the management of genetic diversity, the importance of pollinators, or the impact of environmental factors, each feature plays a significant role in the fascinating world of blueberries. # #### Features Unit Description # + Clonesize(m2): The average blueberry clone size in the field. # + Honeybee (bees/m2/min): Honeybee density in the field. # + Bumbles (bees/m2/min): Bumblebee density in the field. # + Andrena (bees/m2/min): Andrena bee density in the field. # + Osmia (bees/m2/min): Osmia bee density in the field. # + MaxOfUpperTRange (℃): The highest record of the upper band daily air temperature during the bloom season. # + MinOfUpperTRange (℃): The lowest record of the upper band daily air temperature7. # + AverageOfUpperTRange (℃): The average of the upper band daily air temperature. # + MaxOfLowerTRange (℃): The highest record of the lower band daily air temperature. # + MinOfLowerTRange (℃): The lowest record of the lower band daily air temperature. # + AverageOfLowerTRange (℃): The average of the lower band daily air temperature. # + RainingDays: Day The total number of days during the bloom season, each of which has precipitation larger than zero. # + AverageRainingDays: Day The average of raining days of the entire bloom season. # ### Importing Libraries & Data import numpy as np import pandas as pd import ydata_profiling import ipywidgets as widgets from IPython.display import display, Image import matplotlib.pyplot as plt import seaborn as sns import plotly.graph_objects as go import plotly.express as px from plotly.subplots import make_subplots import prettytable from prettytable import PrettyTable import xgboost as xgb from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_error import warnings warnings.filterwarnings("ignore") train_df = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") test_df = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") original_df = pd.read_csv( "/kaggle/input/original-df/WildBlueberryPollinationSimulationData.csv" ) # ### Exploratory Data Analysis # #### Breif analysis about the train by using the profileReport ydata_profiling.ProfileReport(train_df) # #### Droping the unwanted columns # ^-^ Droping the id columns from train_df ^-^ train_df.drop("id", axis=1, inplace=True) # ^-^ Droping the id column from the test_df ^-^ test_df.drop("id", axis=1, inplace=True) # ^_^ Droping the ROW# from the original_df ^-^ original_df.drop(["Row#"], axis=1, inplace=True) # #### Discription of train_df & original_df class DescriptiveStatsAnalyzer: def __init__(self, original_df, train_df): self.original_df = original_df self.train_df = train_df self.description = self.original_df.describe() self.description_train = self.train_df.describe() self.row_dropdown = widgets.Dropdown( options=self.description.index, description="Select Row:" ) self.dataf_dropdown = widgets.Dropdown( options=["Original_df", "Train_df"], description="DataFrame:" ) self.output = widgets.Output() self.row_dropdown.observe(self.on_dropdown_change, names="value") self.dataf_dropdown.observe(self.on_dropdown_change, names="value") def update_table(self, change): selected_row = self.row_dropdown.value selected_df = self.dataf_dropdown.value if selected_df == "Original_df": selected_description = self.description else: selected_description = self.description_train table = prettytable.PrettyTable() table.field_names = [selected_df, selected_row] for column in selected_description.columns: value = selected_description.loc[selected_row, column] table.add_row([column, value]) with self.output: self.output.clear_output() print(table.get_string()) def on_dropdown_change(self, change): self.update_table(change) def display_widgets(self): display(widgets.HBox([self.row_dropdown, self.dataf_dropdown])) display(self.output) # ^-^ Here by using the dropdown we can select the required rows from the description table and at a time we can select the data set we want... ^-^ descriptive_stats_analyzer = DescriptiveStatsAnalyzer(original_df, train_df) descriptive_stats_analyzer.display_widgets() # #### Data visualization def plot_distribution( train_df, original_df, target_col="yield", n_cols=4, figsize=(1000, 900), bar_size=0.2, ): if "id" in train_df.columns: train_df = train_df.drop(columns=["id"]) if "id" in original_df.columns: original_df = original_df.drop(columns=["id"]) num_cols = len(train_df.columns) - 1 # Exclude the target column num_rows = (num_cols + n_cols - 1) // n_cols fig = make_subplots(rows=num_rows, cols=n_cols) col_idx = 0 for row in range(1, num_rows + 1): for col in range(1, n_cols + 1): if col_idx >= num_cols: break column_name = train_df.columns[col_idx] if column_name != target_col: fig.add_trace( go.Histogram( x=train_df[column_name], histnorm="probability", name="Train", xbins=dict(size=bar_size), ), row=row, col=col, ) fig.add_trace( go.Histogram( x=original_df[column_name], histnorm="probability", name="Original", xbins=dict(size=bar_size), ), row=row, col=col, ) fig.update_xaxes(title_text=column_name, row=row, col=col) fig.update_yaxes(title_text=" ", row=row, col=col) col_idx += 1 fig.update_layout( height=figsize[0], width=figsize[1], title_text="Histograms Comparison" ) fig.show() # Call the function with your dataframes plot_distribution(train_df, original_df, target_col="yield", n_cols=4) # #### Displaying the Yield of the train_df & original_df from tqdm import tqdm # Define CSS with the necessary values CSS = { "ftre_plots_req": "Y", "target": "yield", # Replace 'yield' with the actual target column name "title_specs": {"font": {"size": 16}}, # Adjust the title specifications as needed } if CSS["ftre_plots_req"] == "Y": fig = make_subplots( rows=1, cols=2, subplot_titles=("Train data - target", "Original data - target"), horizontal_spacing=0.25, ) for i, df in tqdm(enumerate([train_df, original_df]), "Target plot ---"): row = 1 col = i + 1 df_name = "Train" if i == 0 else "Original" fig.add_trace( go.Histogram(x=df[CSS["target"]], marker_color="orange", nbinsx=50), row=row, col=col, ) fig.update_xaxes(title_text=CSS["target"], row=row, col=col) fig.update_yaxes(title_text="Count", row=row, col=col) fig.update_layout( showlegend=False, title=f"\n{df_name} data - target\n", **CSS["title_specs"] ) fig.update_layout(height=400, width=880) fig.show() print() # #### Feature Correlation of train_df & original_df class CorrelationAnalyzer: def __init__(self, train_df, original_df): self.train_df = train_df self.original_df = original_df self.train_corr = self.train_df.corr() self.original_corr = self.original_df.corr() self.df_dropdown = widgets.Dropdown( options=["Train", "Original"], description="DataFrame:" ) self.output = widgets.Output() self.df_dropdown.observe(self.on_dropdown_change, names="value") def update_heatmap(self, change): selected_df = self.df_dropdown.value if selected_df == "Train": corr = self.train_corr cmap = "viridis" title = "Feature Correlation for train_df" else: corr = self.original_corr cmap = "YlOrRd" title = "Feature Correlation for original_df" with self.output: self.output.clear_output() plt.figure(figsize=(18, 12)) sns.heatmap(np.abs(corr), cmap=cmap, annot=True) plt.title(title) plt.show() def on_dropdown_change(self, change): self.update_heatmap(change) def display_widgets(self): display(self.df_dropdown) display(self.output) # ^-^ By using the drop down we can select the correlation matrix we want ^-^ correlation_analyzer = CorrelationAnalyzer(train_df, original_df) correlation_analyzer.display_widgets() # #### Visualizing the data in box plot class BoxPlotVisualizer: def __init__(self, train_df, original_df): self.train_df = train_df self.original_df = original_df self.dropdown = None self.output = None def plot_boxplots(self, dataset): plt.style.use("bmh") fig, axs = plt.subplots(4, 4, figsize=(16, 14)) data_columns = [ "honeybee", "bumbles", "andrena", "osmia", "MaxOfUpperTRange", "MinOfUpperTRange", "AverageOfUpperTRange", "MaxOfLowerTRange", "MinOfLowerTRange", "AverageOfLowerTRange", "RainingDays", "AverageRainingDays", "fruitset", "fruitmass", "seeds", "yield", ] if dataset == "Train": df = self.train_df box_color = "yellow" elif dataset == "Original": df = self.original_df box_color = "red" for i, column in enumerate(data_columns): row = i // 4 col = i % 4 bp = axs[row, col].boxplot(df[column], vert=False, patch_artist=True) axs[row, col].set_title(column) # Set box colors for box in bp["boxes"]: box.set(color="black", facecolor=box_color) for whisker in bp["whiskers"]: whisker.set(color="black") for cap in bp["caps"]: cap.set(color="black") for median in bp["medians"]: median.set(color="black") for flier in bp["fliers"]: flier.set( marker="o", markerfacecolor="green", markersize=8, linestyle="none" ) fig.tight_layout() plt.show() def dropdown_change(self, change): with self.output: self.output.clear_output() self.plot_boxplots(change.new) def display_widgets(self): # Create the dropdown widget self.dropdown = widgets.Dropdown( options=["Train", "Original"], value="Train", description="Dataset:" ) # Create the output widget self.output = widgets.Output() # Register the dropdown widget's observe function self.dropdown.observe(self.dropdown_change, names="value") # Display the dropdown and output widgets display(self.dropdown) display(self.output) train_data = train_df original_data = original_df # ^-^ Same as the above we are displaying the box plot by using the dropdown ^-^ visualizer = BoxPlotVisualizer(train_data, original_data) visualizer.display_widgets() # #### Removing the outliers # Calculate z-scores for each column train_z_scores = np.abs((train_df - train_df.mean()) / train_df.std()) original_z_scores = np.abs((original_df - original_df.mean()) / original_df.std()) # Set a threshold for the z-score above which values are considered outliers threshold = 3 # Remove rows with outliers for each column train_df = train_df[(train_z_scores < threshold).all(axis=1)] original_df = original_df[(original_z_scores < threshold).all(axis=1)] # #### Droping the Highly correlated columns # Define the columns to drop columns_to_drop = [ "MaxOfUpperTRange", "MinOfUpperTRange", "AverageOfUpperTRange", "MaxOfLowerTRange", "MinOfLowerTRange", "AverageOfLowerTRange", "RainingDays", "AverageRainingDays", ] # Drop the specified columns from train_df and original_df train_df = train_df.drop(columns_to_drop, axis=1) original_df = original_df.drop(columns_to_drop, axis=1) test_df = test_df.drop(columns_to_drop, axis=1) # #### Finding the no.of null and duplicates # null.sum() values in train_df and original_df train_null_sum = train_df.isnull().sum().sum() original_null_sum = original_df.isnull().sum().sum() # duplicate.sum() values in train_df and original_df train_duplicate_sum = train_df.duplicated().sum() original_duplicate_sum = original_df.duplicated().sum() # Displaying results in a pretty table table = PrettyTable() table.field_names = ["Data Frame", "Null.Sum()", "Duplicate.Sum()"] table.add_row(["train_df", train_null_sum, train_duplicate_sum]) table.add_row(["original_df", original_null_sum, original_duplicate_sum]) print(table) # #### Droping the Duplicates train_df.drop_duplicates(inplace=True) train_df.shape # #### Baseline Model # Concatenate the two dataframes combined_df = pd.concat([train_df, original_df]) # Separate the features (X) and target variable (y) for training X = combined_df.drop(columns=["yield"]) y = combined_df["yield"] # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # Create and train the XGBoost model using the training data xgb_model = xgb.XGBRegressor() xgb_model.fit(X_train, y_train) # Make predictions using the trained model xgb_preds = xgb_model.predict(X_test) # #### Predcting the values with the test_df # predict with the test dataset result = xgb_model.predict(test_df) result sample_submission = pd.read_csv( "/kaggle/input/playground-series-s3e14/sample_submission.csv" ) sample_submission # #### Creating the Submission file sample_submission["yield"] = result sample_submission # sample_submission.to_csv('submission_XGB.csv', index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/357/129357290.ipynb
null
null
[{"Id": 129357290, "ScriptId": 38436425, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10938688, "CreationDate": "05/13/2023 05:16:34", "VersionNumber": 4.0, "Title": "wild blue berry predctionS3E14", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 499.0, "LinesInsertedFromPrevious": 49.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 450.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 17}]
null
null
null
null
# ## Table of Contents # + [Intoduction](#Introduction:) # + [Data Description](#Data-Description) # + [Features Unit Description](#Features-Unit-Description) # # + [Importing Libraries & Data](#Importing-Libraries-&-Data) # + [Exploratory Data Analysis(EDA)](#Exploratory-Data-Analysis) # + [Breif analysis about the train by using the profileReport](#Breif-analysis-about-the-train-by-using-the-profileReport) # + [Droping the unwanted columns](#Droping-the-unwanted-columns) # + [Discription of train_df & original_df](#Discription-of-train_df-&-original_df) # + [Data visualization](#Data-visualization) # + [Displaying the Yield of the train_df & original_df](#Displaying-the-Yield-of-the-train_df-&-original_df) # + [Feature Correlation of train_df & original_df ](#Feature-Correlation-of-train_df-&-original_df) # + [Visualizing the data in box plot](#Visualizing-the-data-in-box-plot) # + [Removing the outliers](#Removing-the-outliers) # + [Droping the Highly correlated columns](#Droping-the-Highly-correlated-columns) # + [Finding the no.of null and duplicates](#Finding-the-no.of-null-and-duplicates) # + [Droping the Duplicates](#Droping-the-Duplicates) # + [Baseline Model](#Baseline-Model) # + [Predcting the values with the test_df](#Predcting-the-values-with-the-test_df) # + [Creating the Submission file](#Creating-the-Submission-file) # + [Why I used ipywidgets](#Why-I-used-ipywidgets-?) # ## Introduction: from IPython.display import Image Image("/kaggle/input/blueberry/blueberry.jpeg") # #### Data Description # Blueberries: Exploring Key Features and Their Impact # Blueberries are not just delicious fruits; they also possess several fascinating features that contribute to their growth, development, and overall value. In this essay, we will delve into the various features associated with blueberries and understand their significance. # Clone Size: # The clone size refers to the number of genetically identical blueberry plants that originate from a single parent plant. Blueberries are commonly propagated through cloning to ensure consistency in traits, such as fruit size, flavor, and disease resistance. Large clone sizes allow for the production of uniform blueberries that meet specific market demands. # Pollinators: # Blueberries rely on various pollinators, including honeybees, bumblebees, and native bees like Andrena and Osmia species. These pollinators play a vital role in transferring pollen from the male flower parts to the female flower parts, enabling fertilization and fruit formation. The presence of a diverse and abundant pollinator population ensures successful pollination and optimal blueberry production. # Temperature Ranges: # Blueberries exhibit preferences for specific temperature ranges during their growth stages. The maximum and minimum upper temperature ranges indicate the upper limits and lower limits, respectively, of temperatures that blueberry plants can tolerate without detrimental effects. These temperature ranges are essential for determining suitable growing conditions and managing blueberry crops in different regions. # Raining Days and Average Raining Days: # Blueberries require an adequate water supply to thrive, making rainfall an important factor. The column "Raining Days" represents the number of days on which rain occurs, while "Average Raining Days" provides an average count over a specific period. Sufficient rainfall ensures proper hydration for blueberry plants, supports nutrient uptake, and contributes to overall plant health and productivity. # Fruit Set, Fruit Mass, Seeds, and Yield: # These features are directly related to the blueberry fruit itself and its productivity. "Fruit set" refers to the process of fruit development and the successful formation of berries. It indicates the percentage of flowers that progress to become mature fruits. "Fruit mass" represents the weight of individual blueberries and is an essential factor in determining market value and consumer preference. The "seeds" column indicates the number of seeds present within each blueberry, contributing to its nutritional composition and potential for propagation. Lastly, "yield" refers to the total quantity of blueberries harvested from a given area or number of plants, providing an important measure of productivity and economic value. # Understanding and managing these features are crucial for blueberry growers and researchers. By optimizing clone size, ensuring a diverse pollinator population, maintaining suitable temperature ranges, monitoring rainfall patterns, and enhancing fruit set, fruit mass, seed count, and yield, growers can maximize blueberry production and meet market demands. # In conclusion, blueberries possess a range of features that influence their growth, quality, and productivity. The interplay of clone size, pollinators, temperature ranges, rainfall, and fruit characteristics all contribute to the successful cultivation and utilization of blueberries. Whether it's the management of genetic diversity, the importance of pollinators, or the impact of environmental factors, each feature plays a significant role in the fascinating world of blueberries. # #### Features Unit Description # + Clonesize(m2): The average blueberry clone size in the field. # + Honeybee (bees/m2/min): Honeybee density in the field. # + Bumbles (bees/m2/min): Bumblebee density in the field. # + Andrena (bees/m2/min): Andrena bee density in the field. # + Osmia (bees/m2/min): Osmia bee density in the field. # + MaxOfUpperTRange (℃): The highest record of the upper band daily air temperature during the bloom season. # + MinOfUpperTRange (℃): The lowest record of the upper band daily air temperature7. # + AverageOfUpperTRange (℃): The average of the upper band daily air temperature. # + MaxOfLowerTRange (℃): The highest record of the lower band daily air temperature. # + MinOfLowerTRange (℃): The lowest record of the lower band daily air temperature. # + AverageOfLowerTRange (℃): The average of the lower band daily air temperature. # + RainingDays: Day The total number of days during the bloom season, each of which has precipitation larger than zero. # + AverageRainingDays: Day The average of raining days of the entire bloom season. # ### Importing Libraries & Data import numpy as np import pandas as pd import ydata_profiling import ipywidgets as widgets from IPython.display import display, Image import matplotlib.pyplot as plt import seaborn as sns import plotly.graph_objects as go import plotly.express as px from plotly.subplots import make_subplots import prettytable from prettytable import PrettyTable import xgboost as xgb from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_error import warnings warnings.filterwarnings("ignore") train_df = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") test_df = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") original_df = pd.read_csv( "/kaggle/input/original-df/WildBlueberryPollinationSimulationData.csv" ) # ### Exploratory Data Analysis # #### Breif analysis about the train by using the profileReport ydata_profiling.ProfileReport(train_df) # #### Droping the unwanted columns # ^-^ Droping the id columns from train_df ^-^ train_df.drop("id", axis=1, inplace=True) # ^-^ Droping the id column from the test_df ^-^ test_df.drop("id", axis=1, inplace=True) # ^_^ Droping the ROW# from the original_df ^-^ original_df.drop(["Row#"], axis=1, inplace=True) # #### Discription of train_df & original_df class DescriptiveStatsAnalyzer: def __init__(self, original_df, train_df): self.original_df = original_df self.train_df = train_df self.description = self.original_df.describe() self.description_train = self.train_df.describe() self.row_dropdown = widgets.Dropdown( options=self.description.index, description="Select Row:" ) self.dataf_dropdown = widgets.Dropdown( options=["Original_df", "Train_df"], description="DataFrame:" ) self.output = widgets.Output() self.row_dropdown.observe(self.on_dropdown_change, names="value") self.dataf_dropdown.observe(self.on_dropdown_change, names="value") def update_table(self, change): selected_row = self.row_dropdown.value selected_df = self.dataf_dropdown.value if selected_df == "Original_df": selected_description = self.description else: selected_description = self.description_train table = prettytable.PrettyTable() table.field_names = [selected_df, selected_row] for column in selected_description.columns: value = selected_description.loc[selected_row, column] table.add_row([column, value]) with self.output: self.output.clear_output() print(table.get_string()) def on_dropdown_change(self, change): self.update_table(change) def display_widgets(self): display(widgets.HBox([self.row_dropdown, self.dataf_dropdown])) display(self.output) # ^-^ Here by using the dropdown we can select the required rows from the description table and at a time we can select the data set we want... ^-^ descriptive_stats_analyzer = DescriptiveStatsAnalyzer(original_df, train_df) descriptive_stats_analyzer.display_widgets() # #### Data visualization def plot_distribution( train_df, original_df, target_col="yield", n_cols=4, figsize=(1000, 900), bar_size=0.2, ): if "id" in train_df.columns: train_df = train_df.drop(columns=["id"]) if "id" in original_df.columns: original_df = original_df.drop(columns=["id"]) num_cols = len(train_df.columns) - 1 # Exclude the target column num_rows = (num_cols + n_cols - 1) // n_cols fig = make_subplots(rows=num_rows, cols=n_cols) col_idx = 0 for row in range(1, num_rows + 1): for col in range(1, n_cols + 1): if col_idx >= num_cols: break column_name = train_df.columns[col_idx] if column_name != target_col: fig.add_trace( go.Histogram( x=train_df[column_name], histnorm="probability", name="Train", xbins=dict(size=bar_size), ), row=row, col=col, ) fig.add_trace( go.Histogram( x=original_df[column_name], histnorm="probability", name="Original", xbins=dict(size=bar_size), ), row=row, col=col, ) fig.update_xaxes(title_text=column_name, row=row, col=col) fig.update_yaxes(title_text=" ", row=row, col=col) col_idx += 1 fig.update_layout( height=figsize[0], width=figsize[1], title_text="Histograms Comparison" ) fig.show() # Call the function with your dataframes plot_distribution(train_df, original_df, target_col="yield", n_cols=4) # #### Displaying the Yield of the train_df & original_df from tqdm import tqdm # Define CSS with the necessary values CSS = { "ftre_plots_req": "Y", "target": "yield", # Replace 'yield' with the actual target column name "title_specs": {"font": {"size": 16}}, # Adjust the title specifications as needed } if CSS["ftre_plots_req"] == "Y": fig = make_subplots( rows=1, cols=2, subplot_titles=("Train data - target", "Original data - target"), horizontal_spacing=0.25, ) for i, df in tqdm(enumerate([train_df, original_df]), "Target plot ---"): row = 1 col = i + 1 df_name = "Train" if i == 0 else "Original" fig.add_trace( go.Histogram(x=df[CSS["target"]], marker_color="orange", nbinsx=50), row=row, col=col, ) fig.update_xaxes(title_text=CSS["target"], row=row, col=col) fig.update_yaxes(title_text="Count", row=row, col=col) fig.update_layout( showlegend=False, title=f"\n{df_name} data - target\n", **CSS["title_specs"] ) fig.update_layout(height=400, width=880) fig.show() print() # #### Feature Correlation of train_df & original_df class CorrelationAnalyzer: def __init__(self, train_df, original_df): self.train_df = train_df self.original_df = original_df self.train_corr = self.train_df.corr() self.original_corr = self.original_df.corr() self.df_dropdown = widgets.Dropdown( options=["Train", "Original"], description="DataFrame:" ) self.output = widgets.Output() self.df_dropdown.observe(self.on_dropdown_change, names="value") def update_heatmap(self, change): selected_df = self.df_dropdown.value if selected_df == "Train": corr = self.train_corr cmap = "viridis" title = "Feature Correlation for train_df" else: corr = self.original_corr cmap = "YlOrRd" title = "Feature Correlation for original_df" with self.output: self.output.clear_output() plt.figure(figsize=(18, 12)) sns.heatmap(np.abs(corr), cmap=cmap, annot=True) plt.title(title) plt.show() def on_dropdown_change(self, change): self.update_heatmap(change) def display_widgets(self): display(self.df_dropdown) display(self.output) # ^-^ By using the drop down we can select the correlation matrix we want ^-^ correlation_analyzer = CorrelationAnalyzer(train_df, original_df) correlation_analyzer.display_widgets() # #### Visualizing the data in box plot class BoxPlotVisualizer: def __init__(self, train_df, original_df): self.train_df = train_df self.original_df = original_df self.dropdown = None self.output = None def plot_boxplots(self, dataset): plt.style.use("bmh") fig, axs = plt.subplots(4, 4, figsize=(16, 14)) data_columns = [ "honeybee", "bumbles", "andrena", "osmia", "MaxOfUpperTRange", "MinOfUpperTRange", "AverageOfUpperTRange", "MaxOfLowerTRange", "MinOfLowerTRange", "AverageOfLowerTRange", "RainingDays", "AverageRainingDays", "fruitset", "fruitmass", "seeds", "yield", ] if dataset == "Train": df = self.train_df box_color = "yellow" elif dataset == "Original": df = self.original_df box_color = "red" for i, column in enumerate(data_columns): row = i // 4 col = i % 4 bp = axs[row, col].boxplot(df[column], vert=False, patch_artist=True) axs[row, col].set_title(column) # Set box colors for box in bp["boxes"]: box.set(color="black", facecolor=box_color) for whisker in bp["whiskers"]: whisker.set(color="black") for cap in bp["caps"]: cap.set(color="black") for median in bp["medians"]: median.set(color="black") for flier in bp["fliers"]: flier.set( marker="o", markerfacecolor="green", markersize=8, linestyle="none" ) fig.tight_layout() plt.show() def dropdown_change(self, change): with self.output: self.output.clear_output() self.plot_boxplots(change.new) def display_widgets(self): # Create the dropdown widget self.dropdown = widgets.Dropdown( options=["Train", "Original"], value="Train", description="Dataset:" ) # Create the output widget self.output = widgets.Output() # Register the dropdown widget's observe function self.dropdown.observe(self.dropdown_change, names="value") # Display the dropdown and output widgets display(self.dropdown) display(self.output) train_data = train_df original_data = original_df # ^-^ Same as the above we are displaying the box plot by using the dropdown ^-^ visualizer = BoxPlotVisualizer(train_data, original_data) visualizer.display_widgets() # #### Removing the outliers # Calculate z-scores for each column train_z_scores = np.abs((train_df - train_df.mean()) / train_df.std()) original_z_scores = np.abs((original_df - original_df.mean()) / original_df.std()) # Set a threshold for the z-score above which values are considered outliers threshold = 3 # Remove rows with outliers for each column train_df = train_df[(train_z_scores < threshold).all(axis=1)] original_df = original_df[(original_z_scores < threshold).all(axis=1)] # #### Droping the Highly correlated columns # Define the columns to drop columns_to_drop = [ "MaxOfUpperTRange", "MinOfUpperTRange", "AverageOfUpperTRange", "MaxOfLowerTRange", "MinOfLowerTRange", "AverageOfLowerTRange", "RainingDays", "AverageRainingDays", ] # Drop the specified columns from train_df and original_df train_df = train_df.drop(columns_to_drop, axis=1) original_df = original_df.drop(columns_to_drop, axis=1) test_df = test_df.drop(columns_to_drop, axis=1) # #### Finding the no.of null and duplicates # null.sum() values in train_df and original_df train_null_sum = train_df.isnull().sum().sum() original_null_sum = original_df.isnull().sum().sum() # duplicate.sum() values in train_df and original_df train_duplicate_sum = train_df.duplicated().sum() original_duplicate_sum = original_df.duplicated().sum() # Displaying results in a pretty table table = PrettyTable() table.field_names = ["Data Frame", "Null.Sum()", "Duplicate.Sum()"] table.add_row(["train_df", train_null_sum, train_duplicate_sum]) table.add_row(["original_df", original_null_sum, original_duplicate_sum]) print(table) # #### Droping the Duplicates train_df.drop_duplicates(inplace=True) train_df.shape # #### Baseline Model # Concatenate the two dataframes combined_df = pd.concat([train_df, original_df]) # Separate the features (X) and target variable (y) for training X = combined_df.drop(columns=["yield"]) y = combined_df["yield"] # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # Create and train the XGBoost model using the training data xgb_model = xgb.XGBRegressor() xgb_model.fit(X_train, y_train) # Make predictions using the trained model xgb_preds = xgb_model.predict(X_test) # #### Predcting the values with the test_df # predict with the test dataset result = xgb_model.predict(test_df) result sample_submission = pd.read_csv( "/kaggle/input/playground-series-s3e14/sample_submission.csv" ) sample_submission # #### Creating the Submission file sample_submission["yield"] = result sample_submission # sample_submission.to_csv('submission_XGB.csv', index=False)
false
0
5,215
17
5,215
5,215
129320155
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings warnings.filterwarnings("ignore") import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # The first step in the Data Science methodology is to define the problem and formulate a clear research question. In this case, the problem is to create a predictive model that can predict which passengers survived the Titanic shipwreck based on a set of features such as age, gender, socio-economic class, etc. The research question might be: # "What features were most important in determining whether a passenger survived the Titanic shipwreck, and can we build a predictive model that accurately predicts survival based on these features?" # Read in the training data train_df = pd.read_csv("/kaggle/input/titanic/train.csv") # Read in the test data test_df = pd.read_csv("/kaggle/input/titanic/test.csv") train_df test_df # Explore the data: Look at the first few rows of the data, check for missing values, and get a summary of the data using train_df.info() and train_df.describe(). This will give you an idea of what the data looks like and what type of data cleaning/preprocessing may be needed. # Handle missing values: Check for missing values in the data using train_df.isnull().sum() and test_df.isnull().sum(). Decide how to handle missing values (e.g., impute with median/mode values, drop rows/columns with missing values, etc.) based on the amount of missing data and the importance of the missing feature. # Visualize the data: Use visualization tools like matplotlib and seaborn to explore relationships between variables and identify any patterns in the data. This can help you decide which features to include in your model and how to preprocess them. # Feature engineering: Create new features from existing ones if necessary. For example, you might extract the title from the "Name" column or create a "Family Size" feature by combining the "SibSp" and "Parch" columns. # Data preprocessing: Preprocess the data to get it ready for machine learning algorithms. This might include converting categorical variables to numerical values using one-hot encoding or label encoding, scaling numerical features, and normalizing the data. # Split the data: Split the training data into a training set and a validation set to evaluate the performance of your model. # Explore the data train_df.info() # This output shows us the following information: # There are 891 entries (rows) in the dataset. # There are 12 columns in the dataset. # The dataset has a RangeIndex, which means that the index goes from 0 to 890. # The columns are labeled with their names. # Some columns have missing values, indicated by the "Non-Null Count" values being less than 891. # There are three data types in the dataset: float64, int64, and object. # The memory usage of the dataset is 83.7+ KB. train_df.describe() # Based on the summary statistics for the numerical columns in the training dataset, we can see that: # - The mean survival rate of passengers is 0.38, indicating that the majority of passengers in the training set did not survive. # - The average age of passengers is 29.7 years, with a standard deviation of 14.5 years. # - The majority of passengers did not have siblings or spouses onboard, with a median of 0. # - The majority of passengers did not have parents or children onboard, with a median of 0. # - The average fare paid by passengers is 32.2, with a standard deviation of 49.7. # There are some missing values for the "Age", "Cabin" and "Embarked" columns, which will need to be dealt with before training a machine learning model. We will need to decide whether to drop rows with missing values or impute the missing values with some value, such as the mean or median. # We will also need to explore the categorical variables in the dataset, such as "Sex" and "Embarked", to understand how they relate to survival and whether they need to be encoded as numerical values for machine learning models. train_df.head() # Based on the output, we can see that the dataset contains 891 records with 12 columns. The columns in the dataset are as follows: # - PassengerId: The ID of each passenger. # - Survived: Whether the passenger survived (1) or not (0). # - Pclass: The class of travel of each passenger (1st, 2nd, or 3rd class). # - Name: The name of each passenger. # - Sex: The gender of each passenger. # - Age: The age of each passenger (some records are missing). # - SibSp: The number of siblings or spouses aboard the Titanic for each passenger. # - Parch: The number of parents or children aboard the Titanic for each passenger. # - Ticket: The ticket number for each passenger. # - Fare: The fare paid by each passenger. # - Cabin: The cabin number of each passenger (many records are missing). # - Embarked: The port of embarkation for each passenger (C = Cherbourg, Q = Queenstown, S = Southampton) (some records are missing). # There are missing values in the dataset, especially in the "Age" and "Cabin" columns. We may need to fill in or drop missing values as part of data preparation. Additionally, some columns like "Name" and "Ticket" may not be useful for analysis and can be dropped. Finally, we can also create new features like "FamilySize" based on "SibSp" and "Parch" columns to capture the size of the family traveling together. # Check for missing values train_df.isnull().sum() # # The output of the code shows the number of missing values in each column of the train_df DataFrame. As we can see, the Age, Cabin, and Embarked columns have missing values. # There are 177 missing values in the Age column, which could be important for our analysis as it may affect a passenger's chance of survival. There are also 687 missing values in the Cabin column, which is a large proportion of the data. The Embarked column has only 2 missing values, which is a relatively small number compared to the size of the dataset. # We will need to decide how to handle these missing values in our data preparation step. We may choose to impute missing values, drop columns with a large number of missing values, or drop rows with missing values depending on the context of the problem and the impact of missing data on our analysis. test_df.isnull().sum() # Let's update the age field with a representative sample of the data from test_df and train_df # Before updating the missing values in the Age column, we should consider the distribution of ages in our dataset. One approach to filling missing values is to use the mean or median value of the column, but this may not be appropriate if the distribution of ages is skewed or has outliers. # Here's an example code to replace the missing values in the Age column with a random sample from the available data: # Concatenate the train and test dataframes combined_df = pd.concat([train_df, test_df], ignore_index=True) # Replace missing values in the Age column with a random sample from available data age_mean = combined_df["Age"].mean() age_std = combined_df["Age"].std() age_null_count = combined_df["Age"].isnull().sum() # Generate random ages between mean-std and mean+std age_null_random_list = np.random.randint( age_mean - age_std, age_mean + age_std, size=age_null_count ) # Fill missing values with the random values generated combined_df.loc[np.isnan(combined_df["Age"]), "Age"] = age_null_random_list # Split the data back into train and test sets train_df = combined_df.iloc[: len(train_df), :] test_df = combined_df.iloc[len(train_df) :, :] # This code will replace missing values in the Age column with a random sample from the available data, based on the mean and standard deviation of the Age column. We concatenate the train_df and test_df dataframes to ensure that we are using the same distribution of ages in both sets. Then we generate random ages between the mean minus the standard deviation and the mean plus the standard deviation, and replace the missing values with these random values. Finally, we split the data back into train and test sets. # Note that this is just one approach to handling missing values in the Age column, and there are many other techniques that could be used depending on the context of the problem. # Looking for Outliers in Fare sns.boxplot(x=train_df["Fare"]) q1 = np.percentile(train_df["Fare"], 25) q3 = np.percentile(train_df["Fare"], 75) iqr = q3 - q1 upper_bound = q3 + 1.5 * iqr print("The upper bound for Fare is:", upper_bound) print( "Number of fares above the upper bound:", len(train_df[train_df["Fare"] > upper_bound]), ) train_df["CabinLetter"] = train_df["Cabin"].str[0] test_df["CabinLetter"] = test_df["Cabin"].str[0] # Create pivot table to show average Fare for each Cabin cabin_fare = train_df.pivot_table(index="Cabin", values="Fare", aggfunc=np.mean) cabin_fare sns.boxplot(x="CabinLetter", y="Fare", data=train_df) # Based on the code output, it looks like there are 116 fares in the train_df DataFrame that are above the upper bound of 65.6344. These fares may be considered potential outliers, and you can decide how to handle them based on your problem context. # One approach to handle outliers is to remove them from the dataset. However, it's important to note that removing outliers can also remove important information from the data. Another approach is to cap the outliers to the maximum value within a reasonable range, for example, you can cap fares above 100 or 200 to 100 or 200, respectively. # You may also want to further investigate these high fares to see if there are any patterns or explanations for them. For example, high fares may be associated with a particular ticket class or cabin location. Understanding these patterns can help you make informed decisions about how to handle outliers in your data. sns.boxplot(x="Pclass", y="Fare", data=train_df) high_fare = train_df[train_df["Fare"] > 200] high_fare # Well it seems the Fare Outliers are Mainly in Cabin B for First Class. # > Cabin B on the Titanic was a part of the first-class accommodations on the ship. It was located on the Promenade, Bridge, and Shelter decks and contained a number of luxurious cabins for the wealthiest passengers aboard the ship. # Visualize the data sns.countplot(x="Survived", data=train_df) sns.countplot(x="Sex", hue="Survived", data=train_df) sns.countplot(x="Pclass", hue="Survived", data=train_df) sns.histplot(x="Age", hue="Survived", data=train_df) sns.histplot(x="Age", hue="Survived", data=train_df) # Feature engineering train_df.loc[:, "Title"] = train_df["Name"].apply( lambda x: x.split(",")[1].split(".")[0].strip() ) train_df.loc[:, "FamilySize"] = train_df["SibSp"] + train_df["Parch"] + 1 test_df.loc[:, "Title"] = test_df["Name"].apply( lambda x: x.split(",")[1].split(".")[0].strip() ) test_df.loc[:, "FamilySize"] = test_df["SibSp"] + test_df["Parch"] + 1 # These lines of code create two new features in the train_df DataFrame: # Title: This feature extracts the title from the Name column, such as "Mr", "Mrs", "Miss", etc. The lambda function splits the name by comma, selects the second element (which contains the title), splits it by dot, and selects the first element (which is the title itself). The strip() method removes any extra spaces. # FamilySize: This feature calculates the total number of family members (including the passenger) by summing the SibSp (number of siblings/spouses) and Parch (number of parents/children) columns, and adding 1 to include the passenger themselves. # These features could potentially provide more information for the machine learning model to predict survival, as they capture some additional characteristics of the passengers beyond the original columns. # Data preprocessing train_df = pd.get_dummies(train_df, columns=["Sex", "Embarked"]) train_df = train_df.drop( ["PassengerId", "Name", "Cabin", "Ticket", "Title", "CabinLetter"], axis=1 ) train_df["Fare"] = train_df["Fare"].fillna(train_df["Fare"].median()) test_df = pd.get_dummies(test_df, columns=["Sex", "Embarked"]) test_df = test_df.drop( ["PassengerId", "Name", "Cabin", "Ticket", "Title", "CabinLetter"], axis=1 ) test_df["Fare"] = test_df["Fare"].fillna(test_df["Fare"].median()) # Now we have split the data into training and validation sets using the train_test_split function from the sklearn.model_selection module. # Here, X_train and y_train contain the features and target variables respectively for the training set, while X_valid and y_valid contain the features and target variables respectively for the validation set. # The test_size parameter specifies the proportion of data that should be split into the validation set, while the random_state parameter ensures reproducibility of the split. # It's always a good practice to split the data into training and validation sets to avoid overfitting. Now, you can move on to the next step, which is data preprocessing. # Split the data from sklearn.model_selection import train_test_split X_train, X_valid, y_train, y_valid = train_test_split( train_df.drop("Survived", axis=1), train_df["Survived"], test_size=0.2, random_state=42, ) # Great! You have split the data into training and validation sets using the train_test_split function from the sklearn.model_selection module. # Here, X_train and y_train contain the features and target variables respectively for the training set, while X_valid and y_valid contain the features and target variables respectively for the validation set. # The test_size parameter specifies the proportion of data that should be split into the validation set, while the random_state parameter ensures reproducibility of the split. # It's always a good practice to split the data into training and validation sets to avoid overfitting. Now, you can move on to the next step, which is data preprocessing. # Before we proceed to model selection, let's briefly discuss what machine learning models are suitable for this binary classification problem. # Since the task is to predict whether a passenger survived or not, we need a model that can classify an input as either 0 (did not survive) or 1 (survived). Some of the popular classification algorithms for this problem are: # - Logistic Regression # - K-Nearest Neighbors (KNN) # - Decision Trees # - Random Forest # - Support Vector Machines (SVM) # - Naive Bayes # - Neural Networks # You can start with simpler models like Logistic Regression and KNN and gradually move on to more complex models like Random Forest, SVM, and Neural Networks if needed. It's always a good practice to start with simpler models to establish a baseline performance. # Now, let's move on to model selection. To start with, let's train a Logistic Regression model and evaluate its performance on the validation set. # Logistic Regression # from sklearn.linear_model import LogisticRegression # create a logistic regression model lr_model = LogisticRegression() # train the model on the training set lr_model.fit(X_train, y_train) # predict on the validation set lr_preds = lr_model.predict(X_valid) # evaluate the performance of the model on the validation set from sklearn.metrics import accuracy_score lr_acc = accuracy_score(y_valid, lr_preds) print("Logistic Regression accuracy: {:.2f}%".format(lr_acc * 100)) # K-Nearest Neighbors (KNN) # from sklearn.neighbors import KNeighborsClassifier # create a KNN model knn_model = KNeighborsClassifier(n_neighbors=5) # train the model on the training set knn_model.fit(X_train, y_train) # predict on the validation set knn_preds = knn_model.predict(X_valid) # evaluate the performance of the model on the validation set knn_acc = accuracy_score(y_valid, knn_preds) print("KNN accuracy: {:.2f}%".format(knn_acc * 100)) # Decision Trees # from sklearn.tree import DecisionTreeClassifier # create a Decision Tree model dt_model = DecisionTreeClassifier(max_depth=3) # train the model on the training set dt_model.fit(X_train, y_train) # predict on the validation set dt_preds = dt_model.predict(X_valid) # evaluate the performance of the model on the validation set dt_acc = accuracy_score(y_valid, dt_preds) print("Decision Tree accuracy: {:.2f}%".format(dt_acc * 100)) # Random Forest # from sklearn.ensemble import RandomForestClassifier # create a Random Forest model rf_model = RandomForestClassifier(n_estimators=100, max_depth=5) # train the model on the training set rf_model.fit(X_train, y_train) # predict on the validation set rf_preds = rf_model.predict(X_valid) # evaluate the performance of the model on the validation set rf_acc = accuracy_score(y_valid, rf_preds) print("Random Forest accuracy: {:.2f}%".format(rf_acc * 100)) # Support Vector Machines (SVM) # from sklearn.svm import SVC # create a SVM model svm_model = SVC(kernel="rbf") # train the model on the training set svm_model.fit(X_train, y_train) # predict on the validation set svm_preds = svm_model.predict(X_valid) # evaluate the performance of the model on the validation set svm_acc = accuracy_score(y_valid, svm_preds) print("SVM accuracy: {:.2f}%".format(svm_acc * 100)) # Naive Bayes # from sklearn.naive_bayes import GaussianNB # create a Naive Bayes model nb_model = GaussianNB() # train the model on the training set nb_model.fit(X_train, y_train) # predict on the validation set nb_preds = nb_model.predict(X_valid) # evaluate the performance of the model on the validation set nb_acc = accuracy_score(y_valid, nb_preds) print("Naive Bayes accuracy: {:.2f}%".format(nb_acc * 100)) # Neural Networks # from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense # Build the model model = Sequential() model.add(Dense(64, input_dim=X_train.shape[1], activation="relu")) model.add(Dense(1, activation="sigmoid")) # Compile the model model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Train the model history = model.fit( X_train, y_train, epochs=50, batch_size=32, validation_data=(X_valid, y_valid) ) # Evaluate the model on test data test_loss, test_acc = model.evaluate(X_test, y_test) print("Test accuracy:", test_acc) from tensorflow import keras from tensorflow.keras import layers # Define the model architecture model = keras.Sequential( [ layers.Dense(64, activation="relu", input_shape=(X_train.shape[1],)), layers.Dropout(0.2), layers.Dense(32, activation="relu"), layers.Dropout(0.2), layers.Dense(1, activation="sigmoid"), ] ) # Compile the model model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) # Train the model history = model.fit( X_train, y_train, epochs=50, batch_size=32, validation_data=(X_valid, y_valid) ) # Evaluate the model on test data test_loss, test_acc = model.evaluate(X_test, y_test) print("Test accuracy:", test_acc) # Build the model model = Sequential() model.add(Dense(64, input_dim=X_train.shape[1], activation="relu")) model.add(Dense(1, activation="sigmoid")) # Compile the model model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Train the model history = model.fit( X_train, y_train, epochs=50, batch_size=32, validation_data=(X_train_val, y_train_val), ) # Evaluate the model on validation data val_loss, val_acc = model.evaluate(X_train_val, y_train_val) print("Validation accuracy:", val_acc) # Evaluate the model on test data test_loss, test_acc = model.evaluate(X_test, y_test) print("Test accuracy:", test_acc) from sklearn.model_selection import cross_validate from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score # Define the models models = [ ("Logistic Regression", LogisticRegression()), ("K-Nearest Neighbors", KNeighborsClassifier()), ("Decision Tree", DecisionTreeClassifier()), ("Random Forest", RandomForestClassifier()), ("Support Vector Machine", SVC()), ("Naive Bayes", GaussianNB()), ( "Neural Network", MLPClassifier( hidden_layer_sizes=(64,), activation="relu", solver="adam", max_iter=50 ), ), ] # Evaluate each model using cross-validation results = [] for name, model in models: scores = cross_validate( model, X_train, y_train, cv=5, scoring=("accuracy", "precision", "recall", "f1") ) results.append((name, scores)) # Print the results for name, result in results: print(f"{name}:") print(f'Accuracy: {result["test_accuracy"].mean():.3f}') print(f'Precision: {result["test_precision"].mean():.3f}') print(f'Recall: {result["test_recall"].mean():.3f}') print(f'F1 score: {result["test_f1"].mean():.3f}\n') # Based on the evaluation results, it appears that the Random Forest model has the highest accuracy and F1 score. So, you may want to select the Random Forest model for further analysis and potentially use it for predicting the survival of passengers in the test dataset. However, it's always a good idea to consider other factors such as model complexity, interpretability, and business requirements before making a final decision. # First, we will import the necessary modules and initialize the model. Then, we will define the hyperparameter grid and use GridSearchCV to find the best hyperparameters for the model. Finally, we will fit the optimized model to the training data and evaluate its performance on the validation data. # Here's the code: from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier # Initialize the model rfc = RandomForestClassifier(random_state=42) # Define the hyperparameter grid param_grid = { "n_estimators": [50, 100, 200], "max_depth": [5, 10, 15, 20, None], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], } # Perform Grid Search to find the best hyperparameters grid_search = GridSearchCV(rfc, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train, y_train) # Print the best hyperparameters print("Best Hyperparameters:", grid_search.best_params_) # Fit the optimized model to the training data rfc = RandomForestClassifier(**grid_search.best_params_, random_state=42) rfc.fit(X_train, y_train) # Evaluate the model on the validation data y_pred = rfc.predict(X_valid) accuracy = accuracy_score(y_valid, y_pred) precision = precision_score(y_valid, y_pred) recall = recall_score(y_valid, y_pred) f1 = f1_score(y_valid, y_pred) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1) # Read in the test data train_df = pd.read_csv("/kaggle/input/titanic/train.csv") y = train_df["Survived"] features = ["Pclass", "Sex", "SibSp", "Parch"] X = pd.get_dummies(train_df[features]) X_test = pd.get_dummies(train_df[features]) rfc.fit(X, y) predictions = rfc.predict(X_test) output = pd.DataFrame({"PassengerId": train_df.PassengerId, "Survived": predictions}) output.to_csv("submission.csv", index=False) print("Your submission was successfully saved!") output
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/320/129320155.ipynb
null
null
[{"Id": 129320155, "ScriptId": 38445810, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2428732, "CreationDate": "05/12/2023 18:36:18", "VersionNumber": 3.0, "Title": "Titanic Survival Model", "EvaluationDate": "05/12/2023", "IsChange": false, "TotalLines": 526.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 526.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings warnings.filterwarnings("ignore") import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # The first step in the Data Science methodology is to define the problem and formulate a clear research question. In this case, the problem is to create a predictive model that can predict which passengers survived the Titanic shipwreck based on a set of features such as age, gender, socio-economic class, etc. The research question might be: # "What features were most important in determining whether a passenger survived the Titanic shipwreck, and can we build a predictive model that accurately predicts survival based on these features?" # Read in the training data train_df = pd.read_csv("/kaggle/input/titanic/train.csv") # Read in the test data test_df = pd.read_csv("/kaggle/input/titanic/test.csv") train_df test_df # Explore the data: Look at the first few rows of the data, check for missing values, and get a summary of the data using train_df.info() and train_df.describe(). This will give you an idea of what the data looks like and what type of data cleaning/preprocessing may be needed. # Handle missing values: Check for missing values in the data using train_df.isnull().sum() and test_df.isnull().sum(). Decide how to handle missing values (e.g., impute with median/mode values, drop rows/columns with missing values, etc.) based on the amount of missing data and the importance of the missing feature. # Visualize the data: Use visualization tools like matplotlib and seaborn to explore relationships between variables and identify any patterns in the data. This can help you decide which features to include in your model and how to preprocess them. # Feature engineering: Create new features from existing ones if necessary. For example, you might extract the title from the "Name" column or create a "Family Size" feature by combining the "SibSp" and "Parch" columns. # Data preprocessing: Preprocess the data to get it ready for machine learning algorithms. This might include converting categorical variables to numerical values using one-hot encoding or label encoding, scaling numerical features, and normalizing the data. # Split the data: Split the training data into a training set and a validation set to evaluate the performance of your model. # Explore the data train_df.info() # This output shows us the following information: # There are 891 entries (rows) in the dataset. # There are 12 columns in the dataset. # The dataset has a RangeIndex, which means that the index goes from 0 to 890. # The columns are labeled with their names. # Some columns have missing values, indicated by the "Non-Null Count" values being less than 891. # There are three data types in the dataset: float64, int64, and object. # The memory usage of the dataset is 83.7+ KB. train_df.describe() # Based on the summary statistics for the numerical columns in the training dataset, we can see that: # - The mean survival rate of passengers is 0.38, indicating that the majority of passengers in the training set did not survive. # - The average age of passengers is 29.7 years, with a standard deviation of 14.5 years. # - The majority of passengers did not have siblings or spouses onboard, with a median of 0. # - The majority of passengers did not have parents or children onboard, with a median of 0. # - The average fare paid by passengers is 32.2, with a standard deviation of 49.7. # There are some missing values for the "Age", "Cabin" and "Embarked" columns, which will need to be dealt with before training a machine learning model. We will need to decide whether to drop rows with missing values or impute the missing values with some value, such as the mean or median. # We will also need to explore the categorical variables in the dataset, such as "Sex" and "Embarked", to understand how they relate to survival and whether they need to be encoded as numerical values for machine learning models. train_df.head() # Based on the output, we can see that the dataset contains 891 records with 12 columns. The columns in the dataset are as follows: # - PassengerId: The ID of each passenger. # - Survived: Whether the passenger survived (1) or not (0). # - Pclass: The class of travel of each passenger (1st, 2nd, or 3rd class). # - Name: The name of each passenger. # - Sex: The gender of each passenger. # - Age: The age of each passenger (some records are missing). # - SibSp: The number of siblings or spouses aboard the Titanic for each passenger. # - Parch: The number of parents or children aboard the Titanic for each passenger. # - Ticket: The ticket number for each passenger. # - Fare: The fare paid by each passenger. # - Cabin: The cabin number of each passenger (many records are missing). # - Embarked: The port of embarkation for each passenger (C = Cherbourg, Q = Queenstown, S = Southampton) (some records are missing). # There are missing values in the dataset, especially in the "Age" and "Cabin" columns. We may need to fill in or drop missing values as part of data preparation. Additionally, some columns like "Name" and "Ticket" may not be useful for analysis and can be dropped. Finally, we can also create new features like "FamilySize" based on "SibSp" and "Parch" columns to capture the size of the family traveling together. # Check for missing values train_df.isnull().sum() # # The output of the code shows the number of missing values in each column of the train_df DataFrame. As we can see, the Age, Cabin, and Embarked columns have missing values. # There are 177 missing values in the Age column, which could be important for our analysis as it may affect a passenger's chance of survival. There are also 687 missing values in the Cabin column, which is a large proportion of the data. The Embarked column has only 2 missing values, which is a relatively small number compared to the size of the dataset. # We will need to decide how to handle these missing values in our data preparation step. We may choose to impute missing values, drop columns with a large number of missing values, or drop rows with missing values depending on the context of the problem and the impact of missing data on our analysis. test_df.isnull().sum() # Let's update the age field with a representative sample of the data from test_df and train_df # Before updating the missing values in the Age column, we should consider the distribution of ages in our dataset. One approach to filling missing values is to use the mean or median value of the column, but this may not be appropriate if the distribution of ages is skewed or has outliers. # Here's an example code to replace the missing values in the Age column with a random sample from the available data: # Concatenate the train and test dataframes combined_df = pd.concat([train_df, test_df], ignore_index=True) # Replace missing values in the Age column with a random sample from available data age_mean = combined_df["Age"].mean() age_std = combined_df["Age"].std() age_null_count = combined_df["Age"].isnull().sum() # Generate random ages between mean-std and mean+std age_null_random_list = np.random.randint( age_mean - age_std, age_mean + age_std, size=age_null_count ) # Fill missing values with the random values generated combined_df.loc[np.isnan(combined_df["Age"]), "Age"] = age_null_random_list # Split the data back into train and test sets train_df = combined_df.iloc[: len(train_df), :] test_df = combined_df.iloc[len(train_df) :, :] # This code will replace missing values in the Age column with a random sample from the available data, based on the mean and standard deviation of the Age column. We concatenate the train_df and test_df dataframes to ensure that we are using the same distribution of ages in both sets. Then we generate random ages between the mean minus the standard deviation and the mean plus the standard deviation, and replace the missing values with these random values. Finally, we split the data back into train and test sets. # Note that this is just one approach to handling missing values in the Age column, and there are many other techniques that could be used depending on the context of the problem. # Looking for Outliers in Fare sns.boxplot(x=train_df["Fare"]) q1 = np.percentile(train_df["Fare"], 25) q3 = np.percentile(train_df["Fare"], 75) iqr = q3 - q1 upper_bound = q3 + 1.5 * iqr print("The upper bound for Fare is:", upper_bound) print( "Number of fares above the upper bound:", len(train_df[train_df["Fare"] > upper_bound]), ) train_df["CabinLetter"] = train_df["Cabin"].str[0] test_df["CabinLetter"] = test_df["Cabin"].str[0] # Create pivot table to show average Fare for each Cabin cabin_fare = train_df.pivot_table(index="Cabin", values="Fare", aggfunc=np.mean) cabin_fare sns.boxplot(x="CabinLetter", y="Fare", data=train_df) # Based on the code output, it looks like there are 116 fares in the train_df DataFrame that are above the upper bound of 65.6344. These fares may be considered potential outliers, and you can decide how to handle them based on your problem context. # One approach to handle outliers is to remove them from the dataset. However, it's important to note that removing outliers can also remove important information from the data. Another approach is to cap the outliers to the maximum value within a reasonable range, for example, you can cap fares above 100 or 200 to 100 or 200, respectively. # You may also want to further investigate these high fares to see if there are any patterns or explanations for them. For example, high fares may be associated with a particular ticket class or cabin location. Understanding these patterns can help you make informed decisions about how to handle outliers in your data. sns.boxplot(x="Pclass", y="Fare", data=train_df) high_fare = train_df[train_df["Fare"] > 200] high_fare # Well it seems the Fare Outliers are Mainly in Cabin B for First Class. # > Cabin B on the Titanic was a part of the first-class accommodations on the ship. It was located on the Promenade, Bridge, and Shelter decks and contained a number of luxurious cabins for the wealthiest passengers aboard the ship. # Visualize the data sns.countplot(x="Survived", data=train_df) sns.countplot(x="Sex", hue="Survived", data=train_df) sns.countplot(x="Pclass", hue="Survived", data=train_df) sns.histplot(x="Age", hue="Survived", data=train_df) sns.histplot(x="Age", hue="Survived", data=train_df) # Feature engineering train_df.loc[:, "Title"] = train_df["Name"].apply( lambda x: x.split(",")[1].split(".")[0].strip() ) train_df.loc[:, "FamilySize"] = train_df["SibSp"] + train_df["Parch"] + 1 test_df.loc[:, "Title"] = test_df["Name"].apply( lambda x: x.split(",")[1].split(".")[0].strip() ) test_df.loc[:, "FamilySize"] = test_df["SibSp"] + test_df["Parch"] + 1 # These lines of code create two new features in the train_df DataFrame: # Title: This feature extracts the title from the Name column, such as "Mr", "Mrs", "Miss", etc. The lambda function splits the name by comma, selects the second element (which contains the title), splits it by dot, and selects the first element (which is the title itself). The strip() method removes any extra spaces. # FamilySize: This feature calculates the total number of family members (including the passenger) by summing the SibSp (number of siblings/spouses) and Parch (number of parents/children) columns, and adding 1 to include the passenger themselves. # These features could potentially provide more information for the machine learning model to predict survival, as they capture some additional characteristics of the passengers beyond the original columns. # Data preprocessing train_df = pd.get_dummies(train_df, columns=["Sex", "Embarked"]) train_df = train_df.drop( ["PassengerId", "Name", "Cabin", "Ticket", "Title", "CabinLetter"], axis=1 ) train_df["Fare"] = train_df["Fare"].fillna(train_df["Fare"].median()) test_df = pd.get_dummies(test_df, columns=["Sex", "Embarked"]) test_df = test_df.drop( ["PassengerId", "Name", "Cabin", "Ticket", "Title", "CabinLetter"], axis=1 ) test_df["Fare"] = test_df["Fare"].fillna(test_df["Fare"].median()) # Now we have split the data into training and validation sets using the train_test_split function from the sklearn.model_selection module. # Here, X_train and y_train contain the features and target variables respectively for the training set, while X_valid and y_valid contain the features and target variables respectively for the validation set. # The test_size parameter specifies the proportion of data that should be split into the validation set, while the random_state parameter ensures reproducibility of the split. # It's always a good practice to split the data into training and validation sets to avoid overfitting. Now, you can move on to the next step, which is data preprocessing. # Split the data from sklearn.model_selection import train_test_split X_train, X_valid, y_train, y_valid = train_test_split( train_df.drop("Survived", axis=1), train_df["Survived"], test_size=0.2, random_state=42, ) # Great! You have split the data into training and validation sets using the train_test_split function from the sklearn.model_selection module. # Here, X_train and y_train contain the features and target variables respectively for the training set, while X_valid and y_valid contain the features and target variables respectively for the validation set. # The test_size parameter specifies the proportion of data that should be split into the validation set, while the random_state parameter ensures reproducibility of the split. # It's always a good practice to split the data into training and validation sets to avoid overfitting. Now, you can move on to the next step, which is data preprocessing. # Before we proceed to model selection, let's briefly discuss what machine learning models are suitable for this binary classification problem. # Since the task is to predict whether a passenger survived or not, we need a model that can classify an input as either 0 (did not survive) or 1 (survived). Some of the popular classification algorithms for this problem are: # - Logistic Regression # - K-Nearest Neighbors (KNN) # - Decision Trees # - Random Forest # - Support Vector Machines (SVM) # - Naive Bayes # - Neural Networks # You can start with simpler models like Logistic Regression and KNN and gradually move on to more complex models like Random Forest, SVM, and Neural Networks if needed. It's always a good practice to start with simpler models to establish a baseline performance. # Now, let's move on to model selection. To start with, let's train a Logistic Regression model and evaluate its performance on the validation set. # Logistic Regression # from sklearn.linear_model import LogisticRegression # create a logistic regression model lr_model = LogisticRegression() # train the model on the training set lr_model.fit(X_train, y_train) # predict on the validation set lr_preds = lr_model.predict(X_valid) # evaluate the performance of the model on the validation set from sklearn.metrics import accuracy_score lr_acc = accuracy_score(y_valid, lr_preds) print("Logistic Regression accuracy: {:.2f}%".format(lr_acc * 100)) # K-Nearest Neighbors (KNN) # from sklearn.neighbors import KNeighborsClassifier # create a KNN model knn_model = KNeighborsClassifier(n_neighbors=5) # train the model on the training set knn_model.fit(X_train, y_train) # predict on the validation set knn_preds = knn_model.predict(X_valid) # evaluate the performance of the model on the validation set knn_acc = accuracy_score(y_valid, knn_preds) print("KNN accuracy: {:.2f}%".format(knn_acc * 100)) # Decision Trees # from sklearn.tree import DecisionTreeClassifier # create a Decision Tree model dt_model = DecisionTreeClassifier(max_depth=3) # train the model on the training set dt_model.fit(X_train, y_train) # predict on the validation set dt_preds = dt_model.predict(X_valid) # evaluate the performance of the model on the validation set dt_acc = accuracy_score(y_valid, dt_preds) print("Decision Tree accuracy: {:.2f}%".format(dt_acc * 100)) # Random Forest # from sklearn.ensemble import RandomForestClassifier # create a Random Forest model rf_model = RandomForestClassifier(n_estimators=100, max_depth=5) # train the model on the training set rf_model.fit(X_train, y_train) # predict on the validation set rf_preds = rf_model.predict(X_valid) # evaluate the performance of the model on the validation set rf_acc = accuracy_score(y_valid, rf_preds) print("Random Forest accuracy: {:.2f}%".format(rf_acc * 100)) # Support Vector Machines (SVM) # from sklearn.svm import SVC # create a SVM model svm_model = SVC(kernel="rbf") # train the model on the training set svm_model.fit(X_train, y_train) # predict on the validation set svm_preds = svm_model.predict(X_valid) # evaluate the performance of the model on the validation set svm_acc = accuracy_score(y_valid, svm_preds) print("SVM accuracy: {:.2f}%".format(svm_acc * 100)) # Naive Bayes # from sklearn.naive_bayes import GaussianNB # create a Naive Bayes model nb_model = GaussianNB() # train the model on the training set nb_model.fit(X_train, y_train) # predict on the validation set nb_preds = nb_model.predict(X_valid) # evaluate the performance of the model on the validation set nb_acc = accuracy_score(y_valid, nb_preds) print("Naive Bayes accuracy: {:.2f}%".format(nb_acc * 100)) # Neural Networks # from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense # Build the model model = Sequential() model.add(Dense(64, input_dim=X_train.shape[1], activation="relu")) model.add(Dense(1, activation="sigmoid")) # Compile the model model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Train the model history = model.fit( X_train, y_train, epochs=50, batch_size=32, validation_data=(X_valid, y_valid) ) # Evaluate the model on test data test_loss, test_acc = model.evaluate(X_test, y_test) print("Test accuracy:", test_acc) from tensorflow import keras from tensorflow.keras import layers # Define the model architecture model = keras.Sequential( [ layers.Dense(64, activation="relu", input_shape=(X_train.shape[1],)), layers.Dropout(0.2), layers.Dense(32, activation="relu"), layers.Dropout(0.2), layers.Dense(1, activation="sigmoid"), ] ) # Compile the model model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]) # Train the model history = model.fit( X_train, y_train, epochs=50, batch_size=32, validation_data=(X_valid, y_valid) ) # Evaluate the model on test data test_loss, test_acc = model.evaluate(X_test, y_test) print("Test accuracy:", test_acc) # Build the model model = Sequential() model.add(Dense(64, input_dim=X_train.shape[1], activation="relu")) model.add(Dense(1, activation="sigmoid")) # Compile the model model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Train the model history = model.fit( X_train, y_train, epochs=50, batch_size=32, validation_data=(X_train_val, y_train_val), ) # Evaluate the model on validation data val_loss, val_acc = model.evaluate(X_train_val, y_train_val) print("Validation accuracy:", val_acc) # Evaluate the model on test data test_loss, test_acc = model.evaluate(X_test, y_test) print("Test accuracy:", test_acc) from sklearn.model_selection import cross_validate from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score # Define the models models = [ ("Logistic Regression", LogisticRegression()), ("K-Nearest Neighbors", KNeighborsClassifier()), ("Decision Tree", DecisionTreeClassifier()), ("Random Forest", RandomForestClassifier()), ("Support Vector Machine", SVC()), ("Naive Bayes", GaussianNB()), ( "Neural Network", MLPClassifier( hidden_layer_sizes=(64,), activation="relu", solver="adam", max_iter=50 ), ), ] # Evaluate each model using cross-validation results = [] for name, model in models: scores = cross_validate( model, X_train, y_train, cv=5, scoring=("accuracy", "precision", "recall", "f1") ) results.append((name, scores)) # Print the results for name, result in results: print(f"{name}:") print(f'Accuracy: {result["test_accuracy"].mean():.3f}') print(f'Precision: {result["test_precision"].mean():.3f}') print(f'Recall: {result["test_recall"].mean():.3f}') print(f'F1 score: {result["test_f1"].mean():.3f}\n') # Based on the evaluation results, it appears that the Random Forest model has the highest accuracy and F1 score. So, you may want to select the Random Forest model for further analysis and potentially use it for predicting the survival of passengers in the test dataset. However, it's always a good idea to consider other factors such as model complexity, interpretability, and business requirements before making a final decision. # First, we will import the necessary modules and initialize the model. Then, we will define the hyperparameter grid and use GridSearchCV to find the best hyperparameters for the model. Finally, we will fit the optimized model to the training data and evaluate its performance on the validation data. # Here's the code: from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier # Initialize the model rfc = RandomForestClassifier(random_state=42) # Define the hyperparameter grid param_grid = { "n_estimators": [50, 100, 200], "max_depth": [5, 10, 15, 20, None], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 4], } # Perform Grid Search to find the best hyperparameters grid_search = GridSearchCV(rfc, param_grid=param_grid, cv=5, n_jobs=-1) grid_search.fit(X_train, y_train) # Print the best hyperparameters print("Best Hyperparameters:", grid_search.best_params_) # Fit the optimized model to the training data rfc = RandomForestClassifier(**grid_search.best_params_, random_state=42) rfc.fit(X_train, y_train) # Evaluate the model on the validation data y_pred = rfc.predict(X_valid) accuracy = accuracy_score(y_valid, y_pred) precision = precision_score(y_valid, y_pred) recall = recall_score(y_valid, y_pred) f1 = f1_score(y_valid, y_pred) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1) # Read in the test data train_df = pd.read_csv("/kaggle/input/titanic/train.csv") y = train_df["Survived"] features = ["Pclass", "Sex", "SibSp", "Parch"] X = pd.get_dummies(train_df[features]) X_test = pd.get_dummies(train_df[features]) rfc.fit(X, y) predictions = rfc.predict(X_test) output = pd.DataFrame({"PassengerId": train_df.PassengerId, "Survived": predictions}) output.to_csv("submission.csv", index=False) print("Your submission was successfully saved!") output
false
0
6,630
0
6,630
6,630
129320941
import numpy as np import cv2 import os import re import glob import matplotlib.pyplot as plt import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import np_utils from keras import backend as K from keras.optimizers import SGD from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from keras.models import load_model from keras.preprocessing.image import ImageDataGenerator from keras.layers import BatchNormalization import tensorflow as tf from tensorflow.keras import datasets, layers, models data = { "AnnualCrop": 0, "HerbaceousVegetation": 1, "Industrial": 2, "PermanentCrop": 3, "River": 4, "Forest": 5, "Highway": 6, "Pasture": 7, "Residential": 8, "SeaLake": 9, } data = { "Pasture": 0, "Forest": 1, "Highway": 2, "AnnualCrop": 3, "Residential": 4, "HerbaceousVegetation": 5, "PermanentCrop": 6, "SeaLake": 7, "River": 8, "Industrial": 9, } groups = list(data.keys()) values = list(data.values()) tc = [] dir_path = "EuroSAT_RGB/" for i in range(0, len(groups)): count = 0 for path in os.listdir(dir_path + groups[i]): count += 1 tc.append(count) count = 0 print(tc) plt.bar(values, tc, color="blue", width=0.4) plt.xlabel("Group labels") plt.ylabel("No. of images in each group") plt.show() path = "EuroSAT_RGB/" dirs = os.listdir("EuroSAT_RGB/") label = 0 im_arr = [] lb_arr = [] X = [] y = [] for i in dirs: count = 0 for pic in glob.glob(path + i + "/*.jpg"): im = cv2.imread(pic) im = cv2.resize(im, (32, 32)) im = np.array(im) count = count + 1 X.append(im) y.append(label) if count == 3: im_arr.append({str(i): im}) # print("size "+str(i)+" : "+str(count)) label = label + 1 lb_arr.append(i) X = np.array(X) y = np.array(y) print(X.shape) y print(y.shape) x_train, x_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) x_train = x_train.astype("float32") x_test = x_test.astype("float32") x_train /= 255 x_test /= 255 print(x_train.shape) print(y_train.shape) i = 0 from keras.applications import VGG19 base_model = VGG19(weights="imagenet", include_top=False, input_shape=(32, 32, 3)) for layer in base_model.layers: trainable = False model = Sequential( [ base_model, Flatten(), Dense(128, activation="relu"), Dropout(0.5), Dense(10, activation="softmax"), ] ) model.summary() def eval_prioritization_strategy(prioritizer): train_indices = range(21600) test_accuracies = [] x_train_subset = np.zeros([0, 32, 32, 3]) y_train_subset = np.empty( [ 0, ] ) for i in range(11): selected_indices = train_indices[0:1000] train_indices = train_indices[1000:] x_train_subset = np.concatenate( (x_train_subset, x_train[selected_indices, ...]) ) y_train_subset = np.concatenate( (y_train_subset, y_train[selected_indices, ...]) ) epochs = 70 lrate = 0.01 decay = lrate / epochs sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False) model.compile( loss="sparse_categorical_crossentropy", optimizer=sgd, metrics=["accuracy"] ) print( "********************************************************* ITERATION ", i ) # i=i+1 print("y shape is", y_train_subset.shape) model.fit( x_train_subset, y_train_subset, validation_data=(x_test, y_test), epochs=epochs, batch_size=32, verbose=0, ) loss, accuracy = model.evaluate(x_test, y_test, verbose=0) test_accuracies.append(accuracy) print( "Training data size of %d => accuracy %f" % (x_train_subset.shape[0], accuracy) ) predictions = model.predict(x_train[train_indices, ...]) train_indices = prioritizer(train_indices, predictions) return test_accuracies y_train = y_train.reshape( -1, ) y_test = y_test.reshape( -1, ) print(y_train.shape) y_train def least_confidence_prediction_prioritizer(indices, predictions): max_logit = list(zip(indices, np.amax(predictions, axis=1))) max_logit.sort(key=lambda x: x[1]) # sort in ascending order return list(zip(*max_logit))[0] least_confidence_accuracies = eval_prioritization_strategy( least_confidence_prediction_prioritizer ) # least_confidence_accuracies # [0.5561110973358154, # 0.7485185265541077, # 0.8077777624130249, # 0.8383333086967468, # 0.8566666841506958, # 0.8698148131370544, # 0.8775925636291504, # 0.8829629421234131, # 0.8901851773262024, # 0.882777750492096, # 0.8901851773262024] plt.plot(least_confidence_accuracies, "b", label="least confidence") plt.legend() # Entropy method base_model = VGG19(weights="imagenet", include_top=False, input_shape=(32, 32, 3)) for layer in base_model.layers: trainable = False model = Sequential( [ base_model, Flatten(), Dense(128, activation="relu"), Dropout(0.5), Dense(10, activation="softmax"), ] ) def entropy_prioritizer(indices, predictions): p = predictions * np.log(predictions) p = -p.sum(axis=1) p = list(zip(indices, p)) p.sort(reverse=True, key=lambda x: x[1]) # sort in descending order return list(zip(*p))[0] entropy_prioritized_accuracies = eval_prioritization_strategy(entropy_prioritizer) plt.plot(least_confidence_accuracies, "b", label="least confidence") plt.plot(entropy_prioritized_accuracies, "g", label="highest entropy") plt.legend() def margin_prioritizer(indices, predictions): p = -np.sort(-predictions) # sort in descending order p = p[:, 0] - p[:, 1] p = list(zip(indices, p)) p.sort(key=lambda x: x[1]) # sort in ascending order return list(zip(*p))[0] margin_prioritized_accuracies = eval_prioritization_strategy(margin_prioritizer) plt.plot(least_confidence_accuracies, "b", label="least confidence") plt.plot(entropy_prioritized_accuracies, "g", label="highest entropy") plt.plot(margin_prioritized_accuracies, "r", label="Margin sampling") plt.legend()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/320/129320941.ipynb
null
null
[{"Id": 129320941, "ScriptId": 38397976, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9254416, "CreationDate": "05/12/2023 18:47:02", "VersionNumber": 2.0, "Title": "Active Learning", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 222.0, "LinesInsertedFromPrevious": 18.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 204.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import cv2 import os import re import glob import matplotlib.pyplot as plt import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import np_utils from keras import backend as K from keras.optimizers import SGD from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from keras.models import load_model from keras.preprocessing.image import ImageDataGenerator from keras.layers import BatchNormalization import tensorflow as tf from tensorflow.keras import datasets, layers, models data = { "AnnualCrop": 0, "HerbaceousVegetation": 1, "Industrial": 2, "PermanentCrop": 3, "River": 4, "Forest": 5, "Highway": 6, "Pasture": 7, "Residential": 8, "SeaLake": 9, } data = { "Pasture": 0, "Forest": 1, "Highway": 2, "AnnualCrop": 3, "Residential": 4, "HerbaceousVegetation": 5, "PermanentCrop": 6, "SeaLake": 7, "River": 8, "Industrial": 9, } groups = list(data.keys()) values = list(data.values()) tc = [] dir_path = "EuroSAT_RGB/" for i in range(0, len(groups)): count = 0 for path in os.listdir(dir_path + groups[i]): count += 1 tc.append(count) count = 0 print(tc) plt.bar(values, tc, color="blue", width=0.4) plt.xlabel("Group labels") plt.ylabel("No. of images in each group") plt.show() path = "EuroSAT_RGB/" dirs = os.listdir("EuroSAT_RGB/") label = 0 im_arr = [] lb_arr = [] X = [] y = [] for i in dirs: count = 0 for pic in glob.glob(path + i + "/*.jpg"): im = cv2.imread(pic) im = cv2.resize(im, (32, 32)) im = np.array(im) count = count + 1 X.append(im) y.append(label) if count == 3: im_arr.append({str(i): im}) # print("size "+str(i)+" : "+str(count)) label = label + 1 lb_arr.append(i) X = np.array(X) y = np.array(y) print(X.shape) y print(y.shape) x_train, x_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) x_train = x_train.astype("float32") x_test = x_test.astype("float32") x_train /= 255 x_test /= 255 print(x_train.shape) print(y_train.shape) i = 0 from keras.applications import VGG19 base_model = VGG19(weights="imagenet", include_top=False, input_shape=(32, 32, 3)) for layer in base_model.layers: trainable = False model = Sequential( [ base_model, Flatten(), Dense(128, activation="relu"), Dropout(0.5), Dense(10, activation="softmax"), ] ) model.summary() def eval_prioritization_strategy(prioritizer): train_indices = range(21600) test_accuracies = [] x_train_subset = np.zeros([0, 32, 32, 3]) y_train_subset = np.empty( [ 0, ] ) for i in range(11): selected_indices = train_indices[0:1000] train_indices = train_indices[1000:] x_train_subset = np.concatenate( (x_train_subset, x_train[selected_indices, ...]) ) y_train_subset = np.concatenate( (y_train_subset, y_train[selected_indices, ...]) ) epochs = 70 lrate = 0.01 decay = lrate / epochs sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False) model.compile( loss="sparse_categorical_crossentropy", optimizer=sgd, metrics=["accuracy"] ) print( "********************************************************* ITERATION ", i ) # i=i+1 print("y shape is", y_train_subset.shape) model.fit( x_train_subset, y_train_subset, validation_data=(x_test, y_test), epochs=epochs, batch_size=32, verbose=0, ) loss, accuracy = model.evaluate(x_test, y_test, verbose=0) test_accuracies.append(accuracy) print( "Training data size of %d => accuracy %f" % (x_train_subset.shape[0], accuracy) ) predictions = model.predict(x_train[train_indices, ...]) train_indices = prioritizer(train_indices, predictions) return test_accuracies y_train = y_train.reshape( -1, ) y_test = y_test.reshape( -1, ) print(y_train.shape) y_train def least_confidence_prediction_prioritizer(indices, predictions): max_logit = list(zip(indices, np.amax(predictions, axis=1))) max_logit.sort(key=lambda x: x[1]) # sort in ascending order return list(zip(*max_logit))[0] least_confidence_accuracies = eval_prioritization_strategy( least_confidence_prediction_prioritizer ) # least_confidence_accuracies # [0.5561110973358154, # 0.7485185265541077, # 0.8077777624130249, # 0.8383333086967468, # 0.8566666841506958, # 0.8698148131370544, # 0.8775925636291504, # 0.8829629421234131, # 0.8901851773262024, # 0.882777750492096, # 0.8901851773262024] plt.plot(least_confidence_accuracies, "b", label="least confidence") plt.legend() # Entropy method base_model = VGG19(weights="imagenet", include_top=False, input_shape=(32, 32, 3)) for layer in base_model.layers: trainable = False model = Sequential( [ base_model, Flatten(), Dense(128, activation="relu"), Dropout(0.5), Dense(10, activation="softmax"), ] ) def entropy_prioritizer(indices, predictions): p = predictions * np.log(predictions) p = -p.sum(axis=1) p = list(zip(indices, p)) p.sort(reverse=True, key=lambda x: x[1]) # sort in descending order return list(zip(*p))[0] entropy_prioritized_accuracies = eval_prioritization_strategy(entropy_prioritizer) plt.plot(least_confidence_accuracies, "b", label="least confidence") plt.plot(entropy_prioritized_accuracies, "g", label="highest entropy") plt.legend() def margin_prioritizer(indices, predictions): p = -np.sort(-predictions) # sort in descending order p = p[:, 0] - p[:, 1] p = list(zip(indices, p)) p.sort(key=lambda x: x[1]) # sort in ascending order return list(zip(*p))[0] margin_prioritized_accuracies = eval_prioritization_strategy(margin_prioritizer) plt.plot(least_confidence_accuracies, "b", label="least confidence") plt.plot(entropy_prioritized_accuracies, "g", label="highest entropy") plt.plot(margin_prioritized_accuracies, "r", label="Margin sampling") plt.legend()
false
0
2,240
0
2,240
2,240
129320836
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import time from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.svm import SVC from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split # Load the dataset data = pd.read_csv( "/kaggle/input/yearpredictionmsdtxt/YearPredictionMSD.txt", header=None ) len(data) data = data.sample(frac=0.2) data = data.dropna(subset=[0]) # 0 is the column number of the target variable # Preprocessing # The target column in this dataset is the first column (year), let's categorize it into three categories data.iloc[:, 0] = pd.cut( data.iloc[:, 0], bins=[1922, 1970, 2000, 2011], labels=["Old", "Middle", "New"] ) # Separate features and target X = data.iloc[:, 1:] y = data.iloc[:, 0] # Standardize the features scaler = StandardScaler() X = scaler.fit_transform(X) # Split the dataset into training set and test set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) X_train = X_train[y_train.notna()] y_train = y_train.dropna() classifiers = { "Logistic Regression": LogisticRegression(max_iter=5000), "Random Forest": RandomForestClassifier(), "Gradient Boosting": GradientBoostingClassifier(), } import pandas as pd import matplotlib.pyplot as plt # Initialize a DataFrame to store results results = pd.DataFrame(columns=["PCA_Components", "Classifier", "Accuracy", "Time"]) # Perform PCA and classification for n_components in range(X.shape[1] // 10, X.shape[1] + 1, X.shape[1] // 10): if n_components > 36: break print(f"Number of PCA components: {n_components}") pca = PCA(n_components=n_components) X_train_pca = pca.fit_transform(X_train) X_test_pca = pca.transform(X_test) for classifier_name, classifier in classifiers.items(): start_time = time.time() classifier.fit(X_train_pca, y_train) end_time = time.time() y_pred = classifier.predict(X_test_pca) accuracy = accuracy_score(y_test, y_pred) print( f"Classifier: {classifier_name}, Accuracy: {accuracy:.4f}, Time: {end_time - start_time:.4f} seconds" ) # Add the results to the DataFrame results = results.append( { "PCA_Components": n_components, "Classifier": classifier_name, "Accuracy": accuracy, "Time": end_time - start_time, }, ignore_index=True, ) print() # Plot the results fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14, 7)) # Accuracy plot for classifier_name in classifiers.keys(): subset = results[results["Classifier"] == classifier_name] axes[0].plot( subset["PCA_Components"], subset["Accuracy"], label=f"{classifier_name}" ) axes[0].set_xlabel("Number of PCA components") axes[0].set_ylabel("Accuracy") axes[0].legend() axes[0].set_title("Accuracy Plot") axes[0].grid(True) # Time plot for classifier_name in classifiers.keys(): subset = results[results["Classifier"] == classifier_name] axes[1].plot(subset["PCA_Components"], subset["Time"], label=f"{classifier_name}") axes[1].set_xlabel("Number of PCA components") axes[1].set_ylabel("Time (seconds)") axes[1].legend() axes[1].set_title("Time Plot") axes[1].grid(True) plt.tight_layout() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/320/129320836.ipynb
null
null
[{"Id": 129320836, "ScriptId": 38449339, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3637157, "CreationDate": "05/12/2023 18:45:43", "VersionNumber": 1.0, "Title": "Task2_Nihad&Yusif", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 134.0, "LinesInsertedFromPrevious": 134.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import time from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.svm import SVC from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split # Load the dataset data = pd.read_csv( "/kaggle/input/yearpredictionmsdtxt/YearPredictionMSD.txt", header=None ) len(data) data = data.sample(frac=0.2) data = data.dropna(subset=[0]) # 0 is the column number of the target variable # Preprocessing # The target column in this dataset is the first column (year), let's categorize it into three categories data.iloc[:, 0] = pd.cut( data.iloc[:, 0], bins=[1922, 1970, 2000, 2011], labels=["Old", "Middle", "New"] ) # Separate features and target X = data.iloc[:, 1:] y = data.iloc[:, 0] # Standardize the features scaler = StandardScaler() X = scaler.fit_transform(X) # Split the dataset into training set and test set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) X_train = X_train[y_train.notna()] y_train = y_train.dropna() classifiers = { "Logistic Regression": LogisticRegression(max_iter=5000), "Random Forest": RandomForestClassifier(), "Gradient Boosting": GradientBoostingClassifier(), } import pandas as pd import matplotlib.pyplot as plt # Initialize a DataFrame to store results results = pd.DataFrame(columns=["PCA_Components", "Classifier", "Accuracy", "Time"]) # Perform PCA and classification for n_components in range(X.shape[1] // 10, X.shape[1] + 1, X.shape[1] // 10): if n_components > 36: break print(f"Number of PCA components: {n_components}") pca = PCA(n_components=n_components) X_train_pca = pca.fit_transform(X_train) X_test_pca = pca.transform(X_test) for classifier_name, classifier in classifiers.items(): start_time = time.time() classifier.fit(X_train_pca, y_train) end_time = time.time() y_pred = classifier.predict(X_test_pca) accuracy = accuracy_score(y_test, y_pred) print( f"Classifier: {classifier_name}, Accuracy: {accuracy:.4f}, Time: {end_time - start_time:.4f} seconds" ) # Add the results to the DataFrame results = results.append( { "PCA_Components": n_components, "Classifier": classifier_name, "Accuracy": accuracy, "Time": end_time - start_time, }, ignore_index=True, ) print() # Plot the results fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14, 7)) # Accuracy plot for classifier_name in classifiers.keys(): subset = results[results["Classifier"] == classifier_name] axes[0].plot( subset["PCA_Components"], subset["Accuracy"], label=f"{classifier_name}" ) axes[0].set_xlabel("Number of PCA components") axes[0].set_ylabel("Accuracy") axes[0].legend() axes[0].set_title("Accuracy Plot") axes[0].grid(True) # Time plot for classifier_name in classifiers.keys(): subset = results[results["Classifier"] == classifier_name] axes[1].plot(subset["PCA_Components"], subset["Time"], label=f"{classifier_name}") axes[1].set_xlabel("Number of PCA components") axes[1].set_ylabel("Time (seconds)") axes[1].legend() axes[1].set_title("Time Plot") axes[1].grid(True) plt.tight_layout() plt.show()
false
0
1,214
0
1,214
1,214
129320733
import optuna from optuna import Trial, visualization import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, roc_curve, roc_auc_score, log_loss from sklearn.model_selection import GridSearchCV import xgboost as xgb from sklearn.metrics import mean_squared_error from ydata_profiling import ProfileReport train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") ProfileReport( train, title="train.csv analysis", progress_bar=True, interactions=None, explorative=True, dark_mode=True, notebook={"iframe": {"height": "600px"}}, missing_diagrams={"heatmap": False, "dendrogram": False}, ).to_notebook_iframe() train.columns FEATURES = [ "AB", "AF", "AH", "AM", "AR", "AX", "AY", "AZ", "BC", "BD ", "BN", "BP", "BQ", "BR", "BZ", "CB", "CC", "CD ", "CF", "CH", "CL", "CR", "CS", "CU", "CW ", "DA", "DE", "DF", "DH", "DI", "DL", "DN", "DU", "DV", "DY", "EB", "EE", "EG", "EH", "EJ", "EL", "EP", "EU", "FC", "FD ", "FE", "FI", "FL", "FR", "FS", "GB", "GE", "GF", "GH", "GI", "GL", ] TARGET = "Class" # Data Cleaning train = train.dropna() train["EJ"] = pd.Series(np.where(train.EJ.values == "A", 1, 0), train.index) test["EJ"] = pd.Series(np.where(test.EJ.values == "A", 1, 0), test.index) # Model # splitting train and validation # Splitting training set - startified to get a good repartition of Attrition everywhere x = train[FEATURES] y = train[TARGET] x_train, x_validation, y_train, y_validation = train_test_split( x, y, test_size=0.05, stratify=y ) # defining an xgboost classifer xg_classifier = xgb.XGBClassifier( objective="multi:softmax", num_class=2, tree_method="hist", n_estimators=2000, learning_rate=0.0075, reg_lambda=0.3, reg_alpha=0.2, max_leaves=17, subsample=0.50, colsample_bytree=0.50, max_bin=4096, n_jobs=2, early_stopping_rounds=70, ) xg_classifier.fit( x_train, y_train, eval_set=[(x_train, y_train), (x_validation, y_validation)], verbose=200, ) fi = pd.DataFrame( data=xg_classifier.feature_importances_, index=xg_classifier.feature_names_in_, columns=["FeatureImportance"], ).sort_values(by="FeatureImportance", ascending=False) fi.plot.bar(title="Feature Importance", figsize=(10, 5)) plt.grid() plt.show() log_loss( y_validation, [el[1] for el in xg_classifier.predict_proba(x_validation)], ) def objective(trial, data=x, target=y): train_x, x_validation, train_y, y_validation = train_test_split( data, target, test_size=0.15, random_state=42 ) param = { "objective": "multi:softmax", "num_class": 2, "max_depth": trial.suggest_int("max_depth", 1, 9), "learning_rate": trial.suggest_float("learning_rate", 0.01, 1.0), "early_stopping_rounds": trial.suggest_int("early_stopping_rounds", 50, 200), "subsample": trial.suggest_float("subsample", 0.01, 1.0), "colsample_bytree": trial.suggest_float("colsample_bytree", 0.01, 1.0), "eval_metric": "mlogloss", } model = xgb.XGBClassifier(**param) model.fit(train_x, train_y, eval_set=[(x_validation, y_validation)], verbose=False) lloss = log_loss( y_validation, [el[1] for el in model.predict_proba(x_validation)], ) return lloss study = optuna.create_study(direction="minimize") study.optimize(objective, n_trials=150) print("Number of finished trials:", len(study.trials)) print("Best trial:", study.best_trial.params) best_params = study.best_params clf = xgb.XGBClassifier(**(best_params)) clf.fit( x_train, y_train, eval_set=[(x_train, y_train), (x_validation, y_validation)], verbose=200, ) log_loss( y_validation, [el[1] for el in clf.predict_proba(x_validation)], ) predictions = clf.predict_proba(test[FEATURES]) sample_submission = pd.read_csv( "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" ) sample_submission[["class_0", "class_1"]] = predictions sample_submission.to_csv("submission.csv", index=False) sample_submission
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/320/129320733.ipynb
null
null
[{"Id": 129320733, "ScriptId": 38440330, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3447147, "CreationDate": "05/12/2023 18:44:11", "VersionNumber": 2.0, "Title": "ICR - XGBoost | Optuna Optimised! 0.31", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 115.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 114.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import optuna from optuna import Trial, visualization import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, roc_curve, roc_auc_score, log_loss from sklearn.model_selection import GridSearchCV import xgboost as xgb from sklearn.metrics import mean_squared_error from ydata_profiling import ProfileReport train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") ProfileReport( train, title="train.csv analysis", progress_bar=True, interactions=None, explorative=True, dark_mode=True, notebook={"iframe": {"height": "600px"}}, missing_diagrams={"heatmap": False, "dendrogram": False}, ).to_notebook_iframe() train.columns FEATURES = [ "AB", "AF", "AH", "AM", "AR", "AX", "AY", "AZ", "BC", "BD ", "BN", "BP", "BQ", "BR", "BZ", "CB", "CC", "CD ", "CF", "CH", "CL", "CR", "CS", "CU", "CW ", "DA", "DE", "DF", "DH", "DI", "DL", "DN", "DU", "DV", "DY", "EB", "EE", "EG", "EH", "EJ", "EL", "EP", "EU", "FC", "FD ", "FE", "FI", "FL", "FR", "FS", "GB", "GE", "GF", "GH", "GI", "GL", ] TARGET = "Class" # Data Cleaning train = train.dropna() train["EJ"] = pd.Series(np.where(train.EJ.values == "A", 1, 0), train.index) test["EJ"] = pd.Series(np.where(test.EJ.values == "A", 1, 0), test.index) # Model # splitting train and validation # Splitting training set - startified to get a good repartition of Attrition everywhere x = train[FEATURES] y = train[TARGET] x_train, x_validation, y_train, y_validation = train_test_split( x, y, test_size=0.05, stratify=y ) # defining an xgboost classifer xg_classifier = xgb.XGBClassifier( objective="multi:softmax", num_class=2, tree_method="hist", n_estimators=2000, learning_rate=0.0075, reg_lambda=0.3, reg_alpha=0.2, max_leaves=17, subsample=0.50, colsample_bytree=0.50, max_bin=4096, n_jobs=2, early_stopping_rounds=70, ) xg_classifier.fit( x_train, y_train, eval_set=[(x_train, y_train), (x_validation, y_validation)], verbose=200, ) fi = pd.DataFrame( data=xg_classifier.feature_importances_, index=xg_classifier.feature_names_in_, columns=["FeatureImportance"], ).sort_values(by="FeatureImportance", ascending=False) fi.plot.bar(title="Feature Importance", figsize=(10, 5)) plt.grid() plt.show() log_loss( y_validation, [el[1] for el in xg_classifier.predict_proba(x_validation)], ) def objective(trial, data=x, target=y): train_x, x_validation, train_y, y_validation = train_test_split( data, target, test_size=0.15, random_state=42 ) param = { "objective": "multi:softmax", "num_class": 2, "max_depth": trial.suggest_int("max_depth", 1, 9), "learning_rate": trial.suggest_float("learning_rate", 0.01, 1.0), "early_stopping_rounds": trial.suggest_int("early_stopping_rounds", 50, 200), "subsample": trial.suggest_float("subsample", 0.01, 1.0), "colsample_bytree": trial.suggest_float("colsample_bytree", 0.01, 1.0), "eval_metric": "mlogloss", } model = xgb.XGBClassifier(**param) model.fit(train_x, train_y, eval_set=[(x_validation, y_validation)], verbose=False) lloss = log_loss( y_validation, [el[1] for el in model.predict_proba(x_validation)], ) return lloss study = optuna.create_study(direction="minimize") study.optimize(objective, n_trials=150) print("Number of finished trials:", len(study.trials)) print("Best trial:", study.best_trial.params) best_params = study.best_params clf = xgb.XGBClassifier(**(best_params)) clf.fit( x_train, y_train, eval_set=[(x_train, y_train), (x_validation, y_validation)], verbose=200, ) log_loss( y_validation, [el[1] for el in clf.predict_proba(x_validation)], ) predictions = clf.predict_proba(test[FEATURES]) sample_submission = pd.read_csv( "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" ) sample_submission[["class_0", "class_1"]] = predictions sample_submission.to_csv("submission.csv", index=False) sample_submission
false
0
1,551
0
1,551
1,551
129320703
<jupyter_start><jupyter_text>Football/Soccer | Bundesliga Player Database The Bundesliga Players dataset provides a comprehensive collection of information on every player in the German Bundesliga football league. From renowned goalkeepers to talented defenders, this dataset offers an extensive range of player details including their names, full names, ages, heights, nationalities, places of birth, prices, maximum prices, positions, shirt numbers, preferred foot, current clubs, contract expiration dates, dates of joining the clubs, player agents, and outfitters. Whether you're a passionate football fan, a sports analyst, or a fantasy football enthusiast, this dataset serves as a valuable resource for exploring and analyzing the profiles of Bundesliga players, enabling you to delve into their backgrounds, performance statistics, and club affiliations. Discover the stars of German football and gain insights into their careers with this comprehensive Bundesliga Players dataset. Kaggle dataset identifier: bundesliga-soccer-player <jupyter_script>import numpy as np import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor df_players = pd.read_csv( "/kaggle/input/bundesliga-soccer-player/bundesliga_player.csv", index_col=[0] ) df_players.head() df_players.describe() df_players.info() # ### Variable Usefulness for Predicting Price # 1. Age: Age is likely to be a useful variable for predicting price as younger players generally have higher market values due to their potential for growth and longer career ahead. # 2. Height: Height might have some influence on the price as certain positions or playing styles may favor taller players. However, its impact on price may not be as significant compared to other variables. # 4. Club: Club affiliation is an important variable for predicting price. Players from high-profile clubs or clubs known for producing top talent are often valued more highly in the market. # 5. Position: Position is a crucial factor in determining price. Different positions have varying levels of demand and scarcity, leading to variations in market values. # 6. Contract Expiry Date: The remaining duration of a player's contract can impact their price. Players with longer contract terms may have higher values due to increased stability and reduced transfer urgency. # 7. Contract Start Date: The start date of a player's current contract may have less influence on predicting price compared to other variables. It is more indicative of the player's history with the club rather than their current market value. # 8. Agency/Representative: The player's agency or representative is not directly related to their market value. It is more of a logistical detail and does not provide significant insight into predicting price. # 9. Sponsorship Brand: The sponsorship brand associated with a player does not have a direct impact on their market value. While brand endorsements can increase a player's overall earnings, it may not be a significant factor in price prediction. # 10. Right/Left-Footed: A player's dominant foot is unlikely to have a substantial impact on their market value. It is more relevant to their playing style or preferred positions rather than predicting price. # 11. Max Price (Excluded): The "max price" variable should be excluded from the prediction model because it represents the actual target variable we want to predict to much. Including it as a feature would result in data leakage and lead to an overly optimistic evaluation of the model's performance. # Note: The above analysis is based on general assumptions and domain knowledge. It is recommended to validate the significance of these variables through statistical analysis and feature selection techniques specific to the dataset and prediction task at hand. # df_players = df_players[ [ "age", "height", "nationality", "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", "price", ] ] df_players["outfitter"] = df_players["outfitter"].replace(np.nan, "none") df_players["player_agent"] = df_players["player_agent"].replace(np.nan, "none") df_players.dropna(inplace=True) df_players.shape df_target = df_players[["price"]] df_features = df_players[ [ "age", "height", "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", ] ] for column in df_features.columns: unique_values = df_features[column].unique() print(f"Unique values in column '{column}': {unique_values}") columns_to_encode = [ "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", ] ct = ColumnTransformer( transformers=[("encoder", OneHotEncoder(), columns_to_encode)], remainder="passthrough", ) df_features_encoded = ct.fit_transform(df_features) df_features_encoded.shape x_train, x_test, y_train, y_test = train_test_split( df_features_encoded, df_target, test_size=0.3, random_state=0 ) rfr = RandomForestRegressor(n_estimators=100, random_state=1) rfr.fit(x_train, y_train) pred = rfr.predict(x_test) mse = mean_squared_error(y_test, pred) print(mse)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/320/129320703.ipynb
bundesliga-soccer-player
oles04
[{"Id": 129320703, "ScriptId": 38447882, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12065933, "CreationDate": "05/12/2023 18:43:46", "VersionNumber": 2.0, "Title": "Price Prediction with Randome Forest", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 75.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 63.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185258612, "KernelVersionId": 129320703, "SourceDatasetVersionId": 5668174}]
[{"Id": 5668174, "DatasetId": 3258253, "DatasourceVersionId": 5743664, "CreatorUserId": 12065933, "LicenseName": "Other (specified in description)", "CreationDate": "05/12/2023 07:42:13", "VersionNumber": 1.0, "Title": "Football/Soccer | Bundesliga Player Database", "Slug": "bundesliga-soccer-player", "Subtitle": "Bundesliga Player Database: Complete Profiles, Stats, and Clubs of each Player", "Description": "The Bundesliga Players dataset provides a comprehensive collection of information on every player in the German Bundesliga football league. From renowned goalkeepers to talented defenders, this dataset offers an extensive range of player details including their names, full names, ages, heights, nationalities, places of birth, prices, maximum prices, positions, shirt numbers, preferred foot, current clubs, contract expiration dates, dates of joining the clubs, player agents, and outfitters. Whether you're a passionate football fan, a sports analyst, or a fantasy football enthusiast, this dataset serves as a valuable resource for exploring and analyzing the profiles of Bundesliga players, enabling you to delve into their backgrounds, performance statistics, and club affiliations. Discover the stars of German football and gain insights into their careers with this comprehensive Bundesliga Players dataset.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3258253, "CreatorUserId": 12065933, "OwnerUserId": 12065933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5668174.0, "CurrentDatasourceVersionId": 5743664.0, "ForumId": 3323776, "Type": 2, "CreationDate": "05/12/2023 07:42:13", "LastActivityDate": "05/12/2023", "TotalViews": 7284, "TotalDownloads": 1339, "TotalVotes": 37, "TotalKernels": 11}]
[{"Id": 12065933, "UserName": "oles04", "DisplayName": "Ole", "RegisterDate": "10/23/2022", "PerformanceTier": 2}]
import numpy as np import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor df_players = pd.read_csv( "/kaggle/input/bundesliga-soccer-player/bundesliga_player.csv", index_col=[0] ) df_players.head() df_players.describe() df_players.info() # ### Variable Usefulness for Predicting Price # 1. Age: Age is likely to be a useful variable for predicting price as younger players generally have higher market values due to their potential for growth and longer career ahead. # 2. Height: Height might have some influence on the price as certain positions or playing styles may favor taller players. However, its impact on price may not be as significant compared to other variables. # 4. Club: Club affiliation is an important variable for predicting price. Players from high-profile clubs or clubs known for producing top talent are often valued more highly in the market. # 5. Position: Position is a crucial factor in determining price. Different positions have varying levels of demand and scarcity, leading to variations in market values. # 6. Contract Expiry Date: The remaining duration of a player's contract can impact their price. Players with longer contract terms may have higher values due to increased stability and reduced transfer urgency. # 7. Contract Start Date: The start date of a player's current contract may have less influence on predicting price compared to other variables. It is more indicative of the player's history with the club rather than their current market value. # 8. Agency/Representative: The player's agency or representative is not directly related to their market value. It is more of a logistical detail and does not provide significant insight into predicting price. # 9. Sponsorship Brand: The sponsorship brand associated with a player does not have a direct impact on their market value. While brand endorsements can increase a player's overall earnings, it may not be a significant factor in price prediction. # 10. Right/Left-Footed: A player's dominant foot is unlikely to have a substantial impact on their market value. It is more relevant to their playing style or preferred positions rather than predicting price. # 11. Max Price (Excluded): The "max price" variable should be excluded from the prediction model because it represents the actual target variable we want to predict to much. Including it as a feature would result in data leakage and lead to an overly optimistic evaluation of the model's performance. # Note: The above analysis is based on general assumptions and domain knowledge. It is recommended to validate the significance of these variables through statistical analysis and feature selection techniques specific to the dataset and prediction task at hand. # df_players = df_players[ [ "age", "height", "nationality", "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", "price", ] ] df_players["outfitter"] = df_players["outfitter"].replace(np.nan, "none") df_players["player_agent"] = df_players["player_agent"].replace(np.nan, "none") df_players.dropna(inplace=True) df_players.shape df_target = df_players[["price"]] df_features = df_players[ [ "age", "height", "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", ] ] for column in df_features.columns: unique_values = df_features[column].unique() print(f"Unique values in column '{column}': {unique_values}") columns_to_encode = [ "foot", "position", "club", "contract_expires", "joined_club", "player_agent", "outfitter", ] ct = ColumnTransformer( transformers=[("encoder", OneHotEncoder(), columns_to_encode)], remainder="passthrough", ) df_features_encoded = ct.fit_transform(df_features) df_features_encoded.shape x_train, x_test, y_train, y_test = train_test_split( df_features_encoded, df_target, test_size=0.3, random_state=0 ) rfr = RandomForestRegressor(n_estimators=100, random_state=1) rfr.fit(x_train, y_train) pred = rfr.predict(x_test) mse = mean_squared_error(y_test, pred) print(mse)
false
1
1,126
0
1,369
1,126
129320723
from tensorflow.keras.datasets import cifar10 from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical import numpy as np # Load the data (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Convert the labels to one-hot encoded vectors num_classes = 10 y_train = to_categorical(y_train, num_classes) y_test = to_categorical(y_test, num_classes) # Center the data x_train = x_train.astype("float32") x_test = x_test.astype("float32") x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean # Normalize the data x_train_std = np.std(x_train, axis=0) x_train /= x_train_std x_test /= x_train_std x_train, x_val = x_train[:45000], x_train[45000:] y_train, y_val = y_train[:45000], y_train[45000:] from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization model = models.Sequential() model.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) model.add(BatchNormalization()) model.add(layers.Conv2D(128, (3, 3), activation="relu")) model.add(layers.Conv2D(128, (3, 3), activation="relu")) model.add(layers.Flatten()) model.add(layers.Dense(1024, activation="relu")) model.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss = [] test_acc = [] test_loss1, test_acc1 = model.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss1) test_acc.append(test_acc1) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization from keras.regularizers import l2 model2 = models.Sequential() model2.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model2.add(BatchNormalization()) model2.add(layers.Conv2D(128, (3, 3), activation="relu")) model2.add(layers.Conv2D(128, (3, 3), activation="relu")) model2.add(layers.Flatten()) model2.add(layers.Dense(1024, activation="relu", kernel_regularizer=l2(0.01))) model2.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model2.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model2.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model2.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss2, test_acc2 = model2.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss2) test_acc.append(test_acc2) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization from keras.regularizers import l1 model3 = models.Sequential() model3.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model3.add(BatchNormalization()) model3.add(layers.Conv2D(128, (3, 3), activation="relu")) model3.add(layers.Conv2D(128, (3, 3), activation="relu")) model3.add(layers.Flatten()) model3.add(layers.Dense(1024, activation="relu", kernel_regularizer=l1(0.001))) model3.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model3.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model3.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model3.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss3, test_acc3 = model3.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss3) test_acc.append(test_acc3) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization from keras.regularizers import l1_l2 model4 = models.Sequential() model4.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model4.add(BatchNormalization()) model4.add(layers.Conv2D(128, (3, 3), activation="relu")) model4.add(layers.Conv2D(128, (3, 3), activation="relu")) model4.add(layers.Flatten()) model4.add( layers.Dense(1024, activation="relu", kernel_regularizer=l1_l2(l1=0.01, l2=0.01)) ) model4.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model4.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model4.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model4.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss4, test_acc4 = model4.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss4) test_acc.append(test_acc4) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.constraints import max_norm model5 = models.Sequential() model5.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model5.add(BatchNormalization()) model5.add(layers.Conv2D(128, (3, 3), activation="relu")) model5.add(layers.Conv2D(128, (3, 3), activation="relu")) model5.add(layers.Flatten()) model5.add(layers.Dense(1024, activation="relu", kernel_constraint=max_norm(3.0))) model5.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model5.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model5.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model5.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss5, test_acc5 = model5.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss5) test_acc.append(test_acc5) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization model6 = models.Sequential() model6.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model6.add(layers.BatchNormalization()) model6.add(layers.Conv2D(128, (3, 3), activation="relu")) model6.add(layers.Conv2D(128, (3, 3), activation="relu")) model6.add(layers.Flatten()) model6.add(layers.Dense(1024, activation="relu")) model6.add(layers.Dropout(0.2)) model6.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model6.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model6.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model6.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss6, test_acc6 = model6.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss6) test_acc.append(test_acc6) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.constraints import max_norm model7 = models.Sequential() model7.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model7.add(layers.BatchNormalization()) model7.add(layers.Conv2D(128, (3, 3), activation="relu")) model7.add(layers.Conv2D(128, (3, 3), activation="relu")) model7.add(layers.Flatten()) model7.add(layers.Dense(1024, activation="relu", kernel_constraint=max_norm(3.0))) model7.add(layers.Dropout(0.2)) model7.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model7.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model7.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model7.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss7, test_acc7 = model7.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss7) test_acc.append(test_acc7) import matplotlib.pyplot as plt plt.axhline(test_acc[0], color="r") plt.axhline(test_acc[1], color="g") plt.axhline(test_acc[2], color="k") plt.axhline(test_acc[3], color="b") plt.axhline(test_acc[4], color="c") plt.axhline(test_acc[5], color="m") plt.axhline(test_acc[6], color="y") plt.legend( ["batch_norm", "l2", "l1", "l1+l2", "max_norm", "dropout", "max_norm+dropout"] ) plt.ylim(0.4, 0.7) plt.title("Test accuracy of the model") import matplotlib.pyplot as plt plt.axhline(test_loss[0], color="r") plt.axhline(test_loss[1], color="g") plt.axhline(test_loss[2], color="k") plt.axhline(test_loss[3], color="b") plt.axhline(test_loss[4], color="c") plt.axhline(test_loss[5], color="m") plt.axhline(test_loss[6], color="y") plt.legend( ["batch_norm", "l2", "l1", "l1+l2", "max_norm", "dropout", "max_norm+dropout"] ) # plt.ylim(0.4,0.7) plt.title("Test loss of the model")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/320/129320723.ipynb
null
null
[{"Id": 129320723, "ScriptId": 38434932, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6537991, "CreationDate": "05/12/2023 18:44:05", "VersionNumber": 1.0, "Title": "notebook4166786ea4", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 324.0, "LinesInsertedFromPrevious": 324.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
from tensorflow.keras.datasets import cifar10 from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical import numpy as np # Load the data (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Convert the labels to one-hot encoded vectors num_classes = 10 y_train = to_categorical(y_train, num_classes) y_test = to_categorical(y_test, num_classes) # Center the data x_train = x_train.astype("float32") x_test = x_test.astype("float32") x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean # Normalize the data x_train_std = np.std(x_train, axis=0) x_train /= x_train_std x_test /= x_train_std x_train, x_val = x_train[:45000], x_train[45000:] y_train, y_val = y_train[:45000], y_train[45000:] from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization model = models.Sequential() model.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) model.add(BatchNormalization()) model.add(layers.Conv2D(128, (3, 3), activation="relu")) model.add(layers.Conv2D(128, (3, 3), activation="relu")) model.add(layers.Flatten()) model.add(layers.Dense(1024, activation="relu")) model.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss = [] test_acc = [] test_loss1, test_acc1 = model.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss1) test_acc.append(test_acc1) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization from keras.regularizers import l2 model2 = models.Sequential() model2.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model2.add(BatchNormalization()) model2.add(layers.Conv2D(128, (3, 3), activation="relu")) model2.add(layers.Conv2D(128, (3, 3), activation="relu")) model2.add(layers.Flatten()) model2.add(layers.Dense(1024, activation="relu", kernel_regularizer=l2(0.01))) model2.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model2.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model2.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model2.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss2, test_acc2 = model2.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss2) test_acc.append(test_acc2) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization from keras.regularizers import l1 model3 = models.Sequential() model3.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model3.add(BatchNormalization()) model3.add(layers.Conv2D(128, (3, 3), activation="relu")) model3.add(layers.Conv2D(128, (3, 3), activation="relu")) model3.add(layers.Flatten()) model3.add(layers.Dense(1024, activation="relu", kernel_regularizer=l1(0.001))) model3.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model3.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model3.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model3.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss3, test_acc3 = model3.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss3) test_acc.append(test_acc3) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization from keras.regularizers import l1_l2 model4 = models.Sequential() model4.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model4.add(BatchNormalization()) model4.add(layers.Conv2D(128, (3, 3), activation="relu")) model4.add(layers.Conv2D(128, (3, 3), activation="relu")) model4.add(layers.Flatten()) model4.add( layers.Dense(1024, activation="relu", kernel_regularizer=l1_l2(l1=0.01, l2=0.01)) ) model4.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model4.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model4.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model4.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss4, test_acc4 = model4.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss4) test_acc.append(test_acc4) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.constraints import max_norm model5 = models.Sequential() model5.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model5.add(BatchNormalization()) model5.add(layers.Conv2D(128, (3, 3), activation="relu")) model5.add(layers.Conv2D(128, (3, 3), activation="relu")) model5.add(layers.Flatten()) model5.add(layers.Dense(1024, activation="relu", kernel_constraint=max_norm(3.0))) model5.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model5.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model5.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model5.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss5, test_acc5 = model5.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss5) test_acc.append(test_acc5) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization model6 = models.Sequential() model6.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model6.add(layers.BatchNormalization()) model6.add(layers.Conv2D(128, (3, 3), activation="relu")) model6.add(layers.Conv2D(128, (3, 3), activation="relu")) model6.add(layers.Flatten()) model6.add(layers.Dense(1024, activation="relu")) model6.add(layers.Dropout(0.2)) model6.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model6.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model6.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model6.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss6, test_acc6 = model6.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss6) test_acc.append(test_acc6) from tensorflow.keras import layers, models from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.constraints import max_norm model7 = models.Sequential() model7.add(layers.Conv2D(128, (3, 3), activation="relu", input_shape=(32, 32, 3))) # model7.add(layers.BatchNormalization()) model7.add(layers.Conv2D(128, (3, 3), activation="relu")) model7.add(layers.Conv2D(128, (3, 3), activation="relu")) model7.add(layers.Flatten()) model7.add(layers.Dense(1024, activation="relu", kernel_constraint=max_norm(3.0))) model7.add(layers.Dropout(0.2)) model7.add(layers.Dense(10, activation="softmax")) import tensorflow as tf model7.summary() decay = 1e-6 lr = 0.0001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( lr, decay_steps=10000, decay_rate=decay, staircase=True ) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule) loss = "categorical_crossentropy" model7.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) batch_size = 128 epochs = 10 tf.keras.backend.clear_session() history = model7.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_val, y_val), ) test_loss7, test_acc7 = model7.evaluate(x_test, y_test, verbose=2) test_loss.append(test_loss7) test_acc.append(test_acc7) import matplotlib.pyplot as plt plt.axhline(test_acc[0], color="r") plt.axhline(test_acc[1], color="g") plt.axhline(test_acc[2], color="k") plt.axhline(test_acc[3], color="b") plt.axhline(test_acc[4], color="c") plt.axhline(test_acc[5], color="m") plt.axhline(test_acc[6], color="y") plt.legend( ["batch_norm", "l2", "l1", "l1+l2", "max_norm", "dropout", "max_norm+dropout"] ) plt.ylim(0.4, 0.7) plt.title("Test accuracy of the model") import matplotlib.pyplot as plt plt.axhline(test_loss[0], color="r") plt.axhline(test_loss[1], color="g") plt.axhline(test_loss[2], color="k") plt.axhline(test_loss[3], color="b") plt.axhline(test_loss[4], color="c") plt.axhline(test_loss[5], color="m") plt.axhline(test_loss[6], color="y") plt.legend( ["batch_norm", "l2", "l1", "l1+l2", "max_norm", "dropout", "max_norm+dropout"] ) # plt.ylim(0.4,0.7) plt.title("Test loss of the model")
false
0
3,808
0
3,808
3,808
129320238
<jupyter_start><jupyter_text>Aeroclub 2023 Kaggle dataset identifier: aeroclub-2023 <jupyter_script>import numpy as np import pandas as pd import os task_1_train = pd.read_excel("/kaggle/input/aeroclub-2023/1/Задача №1/train_data.xlsx") task_1_train.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/320/129320238.ipynb
aeroclub-2023
dimka11
[{"Id": 129320238, "ScriptId": 38449206, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2778887, "CreationDate": "05/12/2023 18:37:35", "VersionNumber": 1.0, "Title": "Aeroclub 2023 EDA Both tasks", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 9.0, "LinesInsertedFromPrevious": 9.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185257581, "KernelVersionId": 129320238, "SourceDatasetVersionId": 5671957}]
[{"Id": 5671957, "DatasetId": 3260672, "DatasourceVersionId": 5747475, "CreatorUserId": 2778887, "LicenseName": "Unknown", "CreationDate": "05/12/2023 18:18:42", "VersionNumber": 1.0, "Title": "Aeroclub 2023", "Slug": "aeroclub-2023", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3260672, "CreatorUserId": 2778887, "OwnerUserId": 2778887.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5671957.0, "CurrentDatasourceVersionId": 5747475.0, "ForumId": 3326228, "Type": 2, "CreationDate": "05/12/2023 18:18:42", "LastActivityDate": "05/12/2023", "TotalViews": 47, "TotalDownloads": 3, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 2778887, "UserName": "dimka11", "DisplayName": "Dmitry Sokolov", "RegisterDate": "02/04/2019", "PerformanceTier": 1}]
import numpy as np import pandas as pd import os task_1_train = pd.read_excel("/kaggle/input/aeroclub-2023/1/Задача №1/train_data.xlsx") task_1_train.head()
false
0
66
0
98
66
129320127
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import scipy.signal as signal import scipy.io.wavfile as wavfile import IPython.display as ipd # Load the audio file rate, audio = wavfile.read("/kaggle/input/cowheartbeat2/cowiwoutput2.wav") # Differentiate the audio by applying a high-pass filter fc = 1000 # cutoff frequency in Hz b, a = signal.butter(4, 2 * fc / rate, "lowpass") audio_diff = signal.filtfilt(b, a, audio) # Apply dynamic range compression to increase the perceived volume threshold = 0.5 # threshold in proportion to the maximum amplitude ratio = 1.1 # compression ratio audio_comp = np.where( np.abs(audio_diff) > threshold * np.max(audio_diff), audio_diff / np.abs(audio_diff) * threshold + (1 - threshold) * np.abs(audio_diff) ** ratio, audio_diff, ) # Scale the audio to the maximum amplitude audio_scaled = np.int16(audio_comp / np.max(np.abs(audio_comp)) * 100767) # Save the modified audio as a new file wavfile.write("/kaggle/working/amplified_audiolow1.wav", rate, audio_scaled) ipd.Audio("/kaggle/working/amplified_audiolow1.wav") import numpy as np from scipy.io import wavfile # Read in the filtered audio file rate, audio = wavfile.read("/kaggle/input/cowheartbeat2/cowiwoutput2.wav") # Scale the audio signal to increase its amplitude amplification_factor = 100.0 audio_scaled = audio * amplification_factor # Normalize the audio signal to ensure it's within the allowable range audio_normalized = audio_scaled / np.max(np.abs(audio_scaled)) # Write the amplified audio signal to a new audio file wavfile.write("/kaggle/working/amplified_audio_4.wav", rate, audio_normalized) # Play the new amplified audio file through a speaker using a media player or other audio playback software ipd.Audio("/kaggle/working/amplified_audio_4.wav") import numpy as np from scipy.io import wavfile # Read in the filtered audio file rate, audio = wavfile.read("/kaggle/input/cowheartbeat2/cowiwoutput2.wav") # Define the frequency ranges to keep frequencies_to_keep = np.array([22, 32, 100, 120]) # Compute the maximum absolute amplitude of the audio signal max_amplitude = np.max(np.abs(audio)) # Set a desired maximum amplitude for the output signal desired_max_amplitude = 0.2 * np.iinfo(audio.dtype).max # Compute the gain factor to normalize the signal gain_factor = desired_max_amplitude / max_amplitude # Apply the gain factor to the audio signal audio_normalized = audio * gain_factor # Write the normalized audio to a new file wavfile.write("/kaggle/working/normalized_audio_3.wav", rate, audio_normalized) # Play the normalized audio file through a media player or other audio playback software ipd.Audio("/kaggle/working/normalized_audio_3.wav")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/320/129320127.ipynb
null
null
[{"Id": 129320127, "ScriptId": 38444035, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13202548, "CreationDate": "05/12/2023 18:35:53", "VersionNumber": 1.0, "Title": "notebook0a4b77d92c", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 93.0, "LinesInsertedFromPrevious": 93.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import scipy.signal as signal import scipy.io.wavfile as wavfile import IPython.display as ipd # Load the audio file rate, audio = wavfile.read("/kaggle/input/cowheartbeat2/cowiwoutput2.wav") # Differentiate the audio by applying a high-pass filter fc = 1000 # cutoff frequency in Hz b, a = signal.butter(4, 2 * fc / rate, "lowpass") audio_diff = signal.filtfilt(b, a, audio) # Apply dynamic range compression to increase the perceived volume threshold = 0.5 # threshold in proportion to the maximum amplitude ratio = 1.1 # compression ratio audio_comp = np.where( np.abs(audio_diff) > threshold * np.max(audio_diff), audio_diff / np.abs(audio_diff) * threshold + (1 - threshold) * np.abs(audio_diff) ** ratio, audio_diff, ) # Scale the audio to the maximum amplitude audio_scaled = np.int16(audio_comp / np.max(np.abs(audio_comp)) * 100767) # Save the modified audio as a new file wavfile.write("/kaggle/working/amplified_audiolow1.wav", rate, audio_scaled) ipd.Audio("/kaggle/working/amplified_audiolow1.wav") import numpy as np from scipy.io import wavfile # Read in the filtered audio file rate, audio = wavfile.read("/kaggle/input/cowheartbeat2/cowiwoutput2.wav") # Scale the audio signal to increase its amplitude amplification_factor = 100.0 audio_scaled = audio * amplification_factor # Normalize the audio signal to ensure it's within the allowable range audio_normalized = audio_scaled / np.max(np.abs(audio_scaled)) # Write the amplified audio signal to a new audio file wavfile.write("/kaggle/working/amplified_audio_4.wav", rate, audio_normalized) # Play the new amplified audio file through a speaker using a media player or other audio playback software ipd.Audio("/kaggle/working/amplified_audio_4.wav") import numpy as np from scipy.io import wavfile # Read in the filtered audio file rate, audio = wavfile.read("/kaggle/input/cowheartbeat2/cowiwoutput2.wav") # Define the frequency ranges to keep frequencies_to_keep = np.array([22, 32, 100, 120]) # Compute the maximum absolute amplitude of the audio signal max_amplitude = np.max(np.abs(audio)) # Set a desired maximum amplitude for the output signal desired_max_amplitude = 0.2 * np.iinfo(audio.dtype).max # Compute the gain factor to normalize the signal gain_factor = desired_max_amplitude / max_amplitude # Apply the gain factor to the audio signal audio_normalized = audio * gain_factor # Write the normalized audio to a new file wavfile.write("/kaggle/working/normalized_audio_3.wav", rate, audio_normalized) # Play the normalized audio file through a media player or other audio playback software ipd.Audio("/kaggle/working/normalized_audio_3.wav")
false
0
988
0
988
988
129789734
<jupyter_start><jupyter_text>New York Stock Exchange # Context This dataset is a playground for fundamental and technical analysis. It is said that 30% of traffic on stocks is already generated by machines, can trading be fully automated? If not, there is still a lot to learn from historical data. # Content Dataset consists of following files: - **prices.csv**: raw, as-is daily prices. Most of data spans from 2010 to the end 2016, for companies new on stock market date range is shorter. There have been approx. 140 stock splits in that time, this set doesn't account for that. - **prices-split-adjusted.csv**: same as prices, but there have been added adjustments for splits. - **securities.csv**: general description of each company with division on sectors - **fundamentals.csv**: metrics extracted from annual SEC 10K fillings (2012-2016), should be enough to derive most of popular fundamental indicators. # Acknowledgements Prices were fetched from Yahoo Finance, fundamentals are from Nasdaq Financials, extended by some fields from EDGAR SEC databases. # Inspiration Here is couple of things one could try out with this data: - One day ahead prediction: Rolling Linear Regression, ARIMA, Neural Networks, LSTM - Momentum/Mean-Reversion Strategies - Security clustering, portfolio construction/hedging Which company has biggest chance of being bankrupt? Which one is undervalued (how prices behaved afterwards), what is Return on Investment? Kaggle dataset identifier: nyse <jupyter_code>import pandas as pd df = pd.read_csv('nyse/prices-split-adjusted.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 851264 entries, 0 to 851263 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 date 851264 non-null object 1 symbol 851264 non-null object 2 open 851264 non-null float64 3 close 851264 non-null float64 4 low 851264 non-null float64 5 high 851264 non-null float64 6 volume 851264 non-null float64 dtypes: float64(5), object(2) memory usage: 45.5+ MB <jupyter_text>Examples: { "date": "2016-01-05 00:00:00", "symbol": "WLTW", "open": 123.43, "close": 125.839996, "low": 122.309998, "high": 126.25, "volume": 2163600 } { "date": "2016-01-06 00:00:00", "symbol": "WLTW", "open": 125.239998, "close": 119.980003, "low": 119.940002, "high": 125.540001, "volume": 2386400 } { "date": "2016-01-07 00:00:00", "symbol": "WLTW", "open": 116.379997, "close": 114.949997, "low": 114.93, "high": 119.739998, "volume": 2489500 } { "date": "2016-01-08 00:00:00", "symbol": "WLTW", "open": 115.480003, "close": 116.620003, "low": 113.5, "high": 117.440002, "volume": 2006300 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import tensorflow.compat.v1 as tf import keras import matplotlib.pyplot as plt import math import time import warnings warnings.filterwarnings("ignore") plt.style.use("fivethirtyeight") from sklearn import metrics import numpy as np import pandas as pd import math import sklearn import sklearn.preprocessing import datetime import os import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.callbacks import EarlyStopping from keras.models import Sequential from keras.layers import Dense, Dropout, LSTM, GRU, Bidirectional from sklearn.metrics import mean_squared_error, r2_score from plotly import __version__ from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import cufflinks as cf import plotly.offline as pyo cf.go_offline() pyo.init_notebook_mode() from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split df = pd.read_csv("../input/nyse/prices-split-adjusted.csv") plot_x = df["date"].copy() df.set_index("date", inplace=True) df.index = pd.to_datetime(df.index) df.head() df.duplicated().sum() df.drop_duplicates(inplace=True) df["symbol"].unique() print("\nnumber of different stocks: ", len(list(set(df.symbol)))) print(list(set(df.symbol))[:10]) # We have 501 unique stocks df.symbol.value_counts() # Let's plot for a random stock plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) plt.plot(df[df.symbol == "GOOGL"].open.values, color="red", label="open") plt.plot(df[df.symbol == "GOOGL"].close.values, color="green", label="close") plt.plot(df[df.symbol == "GOOGL"].low.values, color="blue", label="low") plt.plot(df[df.symbol == "GOOGL"].high.values, color="black", label="high") plt.title("stock price") plt.xlabel("time [days]") plt.ylabel("price") plt.legend(loc="best") # plt.show() plt.subplot(1, 2, 2) plt.plot(df[df.symbol == "GOOGL"].volume.values, color="black", label="volume") plt.title("stock volume") plt.xlabel("time [days]") plt.ylabel("volume") plt.legend(loc="best") import sklearn.preprocessing import datetime import os import tensorflow as tf NOC_stock = df[df["symbol"] == "NOC"] x_scaler = MinMaxScaler() y_scaler = MinMaxScaler() NOC_df = NOC_stock.copy() NOC_df.drop(["symbol"], axis=1, inplace=True) x = NOC_df[["open", "low", "high", "volume"]].copy() y = NOC_df["close"].copy() x[["open", "low", "high", "volume"]] = x_scaler.fit_transform(x) y = y_scaler.fit_transform(y.values.reshape(-1, 1)) def load_data(X, seq_len, train_size=0.9): amount_of_features = X.shape[1] X_mat = X.values sequence_length = seq_len + 1 data = [] for index in range(len(X_mat) - sequence_length): data.append(X_mat[index : index + sequence_length]) data = np.array(data) train_split = int(round(train_size * data.shape[0])) train_data = data[:train_split, :] x_train = train_data[:, :-1] y_train = train_data[:, -1][:, -1] x_test = data[train_split:, :-1] y_test = data[train_split:, -1][:, -1] x_train = np.reshape( x_train, (x_train.shape[0], x_train.shape[1], amount_of_features) ) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], amount_of_features)) return x_train, y_train, x_test, y_test window = 22 x["close"] = y X_train, y_train, X_test, y_test = load_data(x, window) print(X_train.shape, y_train.shape, X_test.shape, y_test.shape) plt.figure(figsize=(15, 5)) plt.plot(NOC_stock.open.values, color="red", label="open") plt.plot(NOC_stock.close.values, color="green", label="low") plt.plot(NOC_stock.low.values, color="blue", label="low") plt.plot(NOC_stock.high.values, color="black", label="high") # plt.plot(df_stock_norm.volume.values, color='gray', label='volume') plt.title("stock") plt.xlabel("time [days]") plt.ylabel("price/volume") plt.legend(loc="best") plt.show() nmodel = Sequential() # First LSTM layer with Dropout regularisation nmodel.add(LSTM(units=50, input_shape=(22, 5), return_sequences=True)) nmodel.add(Dropout(0.2)) # Second LSTM layer nmodel.add(LSTM(units=50, return_sequences=True)) nmodel.add(Dropout(0.2)) # Third LSTM layer nmodel.add(LSTM(units=50, return_sequences=True)) nmodel.add(Dropout(0.2)) # Fourth LSTM layer nmodel.add(LSTM(units=50)) nmodel.add(Dropout(0.5)) # The output layer nmodel.add(Dense(units=50, kernel_initializer="uniform", activation="relu")) nmodel.add(Dense(units=1, kernel_initializer="uniform", activation="sigmoid")) earlystop = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=50) callbacks_list = [earlystop] # Compiling the RNN nmodel.compile(optimizer="adam", loss="mean_squared_error") # Fitting to the training set start = time.time() LSTM = nmodel.fit( X_train, y_train, epochs=100, batch_size=35, validation_split=0.05, verbose=1, callbacks=callbacks_list, ) print("compilation time : ", time.time() - start) trainPredict = model.predict(X_train) testPredict = model.predict(X_test) trainPredict = y_scaler.inverse_transform(trainPredict) trainY = y_scaler.inverse_transform([y_train]) testPredict = y_scaler.inverse_transform(testPredict) testY = y_scaler.inverse_transform([y_test]) plot_predicted = testPredict.copy() plot_predicted = plot_predicted.reshape(174, 1) plot_actual = testY.copy() plot_actual = plot_actual.reshape(174, 1) print(plot_actual.shape) print(plot_predicted.shape) plt.figure(figsize=(20, 7)) plot_x = pd.to_datetime(plot_x.iloc[-174:]) plt.plot(pd.DataFrame(plot_predicted), label="Predicted") plt.plot(pd.DataFrame(plot_actual), label="Actual") plt.legend(loc="best") plt.show() trainScore = metrics.mean_squared_error(trainY[0], trainPredict[:, 0]) ** 0.5 print("Train Score: %.2f RMSE" % (trainScore)) testScore = metrics.mean_squared_error(testY[0], testPredict[:, 0]) ** 0.5 print("Test Score: %.2f RMSE" % (testScore))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/789/129789734.ipynb
nyse
dgawlik
[{"Id": 129789734, "ScriptId": 38597736, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7916356, "CreationDate": "05/16/2023 13:31:20", "VersionNumber": 2.0, "Title": "Stock Price Using LSTM and GRU", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 207.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 195.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186156617, "KernelVersionId": 129789734, "SourceDatasetVersionId": 1575}]
[{"Id": 1575, "DatasetId": 854, "DatasourceVersionId": 1575, "CreatorUserId": 839728, "LicenseName": "CC0: Public Domain", "CreationDate": "02/22/2017 10:18:25", "VersionNumber": 3.0, "Title": "New York Stock Exchange", "Slug": "nyse", "Subtitle": "S&P 500 companies historical prices with fundamental data", "Description": "# Context \n\nThis dataset is a playground for fundamental and technical analysis. It is said that 30% of traffic on stocks is already generated by machines, can trading be fully automated? If not, there is still a lot to learn from historical data. \n\n# Content\n\nDataset consists of following files:\n\n - **prices.csv**: raw, as-is daily prices. Most of data spans from 2010 to the end 2016, for companies new on stock market date range is shorter. There have been approx. 140 stock splits in that time, this set doesn't account for that.\n - **prices-split-adjusted.csv**: same as prices, but there have been added adjustments for splits.\n - **securities.csv**: general description of each company with division on sectors\n - **fundamentals.csv**: metrics extracted from annual SEC 10K fillings (2012-2016), should be enough to derive most of popular fundamental indicators.\n\n# Acknowledgements\n\nPrices were fetched from Yahoo Finance, fundamentals are from Nasdaq Financials, extended by some fields from EDGAR SEC databases.\n\n# Inspiration\n\nHere is couple of things one could try out with this data:\n\n - One day ahead prediction: Rolling Linear Regression, ARIMA, Neural Networks, LSTM\n - Momentum/Mean-Reversion Strategies\n - Security clustering, portfolio construction/hedging\n\nWhich company has biggest chance of being bankrupt? Which one is undervalued (how prices behaved afterwards), what is Return on Investment?", "VersionNotes": "Bug in adjusted splits", "TotalCompressedBytes": 105844882.0, "TotalUncompressedBytes": 105844882.0}]
[{"Id": 854, "CreatorUserId": 839728, "OwnerUserId": 839728.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1575.0, "CurrentDatasourceVersionId": 1575.0, "ForumId": 2661, "Type": 2, "CreationDate": "02/19/2017 12:21:52", "LastActivityDate": "02/06/2018", "TotalViews": 468020, "TotalDownloads": 80140, "TotalVotes": 1305, "TotalKernels": 409}]
[{"Id": 839728, "UserName": "dgawlik", "DisplayName": "Dominik Gawlik", "RegisterDate": "12/17/2016", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import tensorflow.compat.v1 as tf import keras import matplotlib.pyplot as plt import math import time import warnings warnings.filterwarnings("ignore") plt.style.use("fivethirtyeight") from sklearn import metrics import numpy as np import pandas as pd import math import sklearn import sklearn.preprocessing import datetime import os import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.callbacks import EarlyStopping from keras.models import Sequential from keras.layers import Dense, Dropout, LSTM, GRU, Bidirectional from sklearn.metrics import mean_squared_error, r2_score from plotly import __version__ from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import cufflinks as cf import plotly.offline as pyo cf.go_offline() pyo.init_notebook_mode() from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split df = pd.read_csv("../input/nyse/prices-split-adjusted.csv") plot_x = df["date"].copy() df.set_index("date", inplace=True) df.index = pd.to_datetime(df.index) df.head() df.duplicated().sum() df.drop_duplicates(inplace=True) df["symbol"].unique() print("\nnumber of different stocks: ", len(list(set(df.symbol)))) print(list(set(df.symbol))[:10]) # We have 501 unique stocks df.symbol.value_counts() # Let's plot for a random stock plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) plt.plot(df[df.symbol == "GOOGL"].open.values, color="red", label="open") plt.plot(df[df.symbol == "GOOGL"].close.values, color="green", label="close") plt.plot(df[df.symbol == "GOOGL"].low.values, color="blue", label="low") plt.plot(df[df.symbol == "GOOGL"].high.values, color="black", label="high") plt.title("stock price") plt.xlabel("time [days]") plt.ylabel("price") plt.legend(loc="best") # plt.show() plt.subplot(1, 2, 2) plt.plot(df[df.symbol == "GOOGL"].volume.values, color="black", label="volume") plt.title("stock volume") plt.xlabel("time [days]") plt.ylabel("volume") plt.legend(loc="best") import sklearn.preprocessing import datetime import os import tensorflow as tf NOC_stock = df[df["symbol"] == "NOC"] x_scaler = MinMaxScaler() y_scaler = MinMaxScaler() NOC_df = NOC_stock.copy() NOC_df.drop(["symbol"], axis=1, inplace=True) x = NOC_df[["open", "low", "high", "volume"]].copy() y = NOC_df["close"].copy() x[["open", "low", "high", "volume"]] = x_scaler.fit_transform(x) y = y_scaler.fit_transform(y.values.reshape(-1, 1)) def load_data(X, seq_len, train_size=0.9): amount_of_features = X.shape[1] X_mat = X.values sequence_length = seq_len + 1 data = [] for index in range(len(X_mat) - sequence_length): data.append(X_mat[index : index + sequence_length]) data = np.array(data) train_split = int(round(train_size * data.shape[0])) train_data = data[:train_split, :] x_train = train_data[:, :-1] y_train = train_data[:, -1][:, -1] x_test = data[train_split:, :-1] y_test = data[train_split:, -1][:, -1] x_train = np.reshape( x_train, (x_train.shape[0], x_train.shape[1], amount_of_features) ) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], amount_of_features)) return x_train, y_train, x_test, y_test window = 22 x["close"] = y X_train, y_train, X_test, y_test = load_data(x, window) print(X_train.shape, y_train.shape, X_test.shape, y_test.shape) plt.figure(figsize=(15, 5)) plt.plot(NOC_stock.open.values, color="red", label="open") plt.plot(NOC_stock.close.values, color="green", label="low") plt.plot(NOC_stock.low.values, color="blue", label="low") plt.plot(NOC_stock.high.values, color="black", label="high") # plt.plot(df_stock_norm.volume.values, color='gray', label='volume') plt.title("stock") plt.xlabel("time [days]") plt.ylabel("price/volume") plt.legend(loc="best") plt.show() nmodel = Sequential() # First LSTM layer with Dropout regularisation nmodel.add(LSTM(units=50, input_shape=(22, 5), return_sequences=True)) nmodel.add(Dropout(0.2)) # Second LSTM layer nmodel.add(LSTM(units=50, return_sequences=True)) nmodel.add(Dropout(0.2)) # Third LSTM layer nmodel.add(LSTM(units=50, return_sequences=True)) nmodel.add(Dropout(0.2)) # Fourth LSTM layer nmodel.add(LSTM(units=50)) nmodel.add(Dropout(0.5)) # The output layer nmodel.add(Dense(units=50, kernel_initializer="uniform", activation="relu")) nmodel.add(Dense(units=1, kernel_initializer="uniform", activation="sigmoid")) earlystop = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=50) callbacks_list = [earlystop] # Compiling the RNN nmodel.compile(optimizer="adam", loss="mean_squared_error") # Fitting to the training set start = time.time() LSTM = nmodel.fit( X_train, y_train, epochs=100, batch_size=35, validation_split=0.05, verbose=1, callbacks=callbacks_list, ) print("compilation time : ", time.time() - start) trainPredict = model.predict(X_train) testPredict = model.predict(X_test) trainPredict = y_scaler.inverse_transform(trainPredict) trainY = y_scaler.inverse_transform([y_train]) testPredict = y_scaler.inverse_transform(testPredict) testY = y_scaler.inverse_transform([y_test]) plot_predicted = testPredict.copy() plot_predicted = plot_predicted.reshape(174, 1) plot_actual = testY.copy() plot_actual = plot_actual.reshape(174, 1) print(plot_actual.shape) print(plot_predicted.shape) plt.figure(figsize=(20, 7)) plot_x = pd.to_datetime(plot_x.iloc[-174:]) plt.plot(pd.DataFrame(plot_predicted), label="Predicted") plt.plot(pd.DataFrame(plot_actual), label="Actual") plt.legend(loc="best") plt.show() trainScore = metrics.mean_squared_error(trainY[0], trainPredict[:, 0]) ** 0.5 print("Train Score: %.2f RMSE" % (trainScore)) testScore = metrics.mean_squared_error(testY[0], testPredict[:, 0]) ** 0.5 print("Test Score: %.2f RMSE" % (testScore))
[{"nyse/prices-split-adjusted.csv": {"column_names": "[\"date\", \"symbol\", \"open\", \"close\", \"low\", \"high\", \"volume\"]", "column_data_types": "{\"date\": \"object\", \"symbol\": \"object\", \"open\": \"float64\", \"close\": \"float64\", \"low\": \"float64\", \"high\": \"float64\", \"volume\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 851264 entries, 0 to 851263\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 date 851264 non-null object \n 1 symbol 851264 non-null object \n 2 open 851264 non-null float64\n 3 close 851264 non-null float64\n 4 low 851264 non-null float64\n 5 high 851264 non-null float64\n 6 volume 851264 non-null float64\ndtypes: float64(5), object(2)\nmemory usage: 45.5+ MB\n", "summary": "{\"open\": {\"count\": 851264.0, \"mean\": 64.99361772892367, \"std\": 75.20389291554726, \"min\": 1.66, \"25%\": 31.27, \"50%\": 48.459999, \"75%\": 75.120003, \"max\": 1584.439941}, \"close\": {\"count\": 851264.0, \"mean\": 65.01191274222055, \"std\": 75.20121600708907, \"min\": 1.59, \"25%\": 31.2927756654, \"50%\": 48.48, \"75%\": 75.139999, \"max\": 1578.130005}, \"low\": {\"count\": 851264.0, \"mean\": 64.33654092484302, \"std\": 74.45951807995792, \"min\": 1.5, \"25%\": 30.940001, \"50%\": 47.970001, \"75%\": 74.400002, \"max\": 1549.939941}, \"high\": {\"count\": 851264.0, \"mean\": 65.63974760649691, \"std\": 75.90686098144873, \"min\": 1.81, \"25%\": 31.620001, \"50%\": 48.959999, \"75%\": 75.849998, \"max\": 1600.930054}, \"volume\": {\"count\": 851264.0, \"mean\": 5415112.640027066, \"std\": 12494681.433080778, \"min\": 0.0, \"25%\": 1221500.0, \"50%\": 2476250.0, \"75%\": 5222500.0, \"max\": 859643400.0}}", "examples": "{\"date\":{\"0\":\"2016-01-05\",\"1\":\"2016-01-06\",\"2\":\"2016-01-07\",\"3\":\"2016-01-08\"},\"symbol\":{\"0\":\"WLTW\",\"1\":\"WLTW\",\"2\":\"WLTW\",\"3\":\"WLTW\"},\"open\":{\"0\":123.43,\"1\":125.239998,\"2\":116.379997,\"3\":115.480003},\"close\":{\"0\":125.839996,\"1\":119.980003,\"2\":114.949997,\"3\":116.620003},\"low\":{\"0\":122.309998,\"1\":119.940002,\"2\":114.93,\"3\":113.5},\"high\":{\"0\":126.25,\"1\":125.540001,\"2\":119.739998,\"3\":117.440002},\"volume\":{\"0\":2163600.0,\"1\":2386400.0,\"2\":2489500.0,\"3\":2006300.0}}"}}]
true
1
<start_data_description><data_path>nyse/prices-split-adjusted.csv: <column_names> ['date', 'symbol', 'open', 'close', 'low', 'high', 'volume'] <column_types> {'date': 'object', 'symbol': 'object', 'open': 'float64', 'close': 'float64', 'low': 'float64', 'high': 'float64', 'volume': 'float64'} <dataframe_Summary> {'open': {'count': 851264.0, 'mean': 64.99361772892367, 'std': 75.20389291554726, 'min': 1.66, '25%': 31.27, '50%': 48.459999, '75%': 75.120003, 'max': 1584.439941}, 'close': {'count': 851264.0, 'mean': 65.01191274222055, 'std': 75.20121600708907, 'min': 1.59, '25%': 31.2927756654, '50%': 48.48, '75%': 75.139999, 'max': 1578.130005}, 'low': {'count': 851264.0, 'mean': 64.33654092484302, 'std': 74.45951807995792, 'min': 1.5, '25%': 30.940001, '50%': 47.970001, '75%': 74.400002, 'max': 1549.939941}, 'high': {'count': 851264.0, 'mean': 65.63974760649691, 'std': 75.90686098144873, 'min': 1.81, '25%': 31.620001, '50%': 48.959999, '75%': 75.849998, 'max': 1600.930054}, 'volume': {'count': 851264.0, 'mean': 5415112.640027066, 'std': 12494681.433080778, 'min': 0.0, '25%': 1221500.0, '50%': 2476250.0, '75%': 5222500.0, 'max': 859643400.0}} <dataframe_info> RangeIndex: 851264 entries, 0 to 851263 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 date 851264 non-null object 1 symbol 851264 non-null object 2 open 851264 non-null float64 3 close 851264 non-null float64 4 low 851264 non-null float64 5 high 851264 non-null float64 6 volume 851264 non-null float64 dtypes: float64(5), object(2) memory usage: 45.5+ MB <some_examples> {'date': {'0': '2016-01-05', '1': '2016-01-06', '2': '2016-01-07', '3': '2016-01-08'}, 'symbol': {'0': 'WLTW', '1': 'WLTW', '2': 'WLTW', '3': 'WLTW'}, 'open': {'0': 123.43, '1': 125.239998, '2': 116.379997, '3': 115.480003}, 'close': {'0': 125.839996, '1': 119.980003, '2': 114.949997, '3': 116.620003}, 'low': {'0': 122.309998, '1': 119.940002, '2': 114.93, '3': 113.5}, 'high': {'0': 126.25, '1': 125.540001, '2': 119.739998, '3': 117.440002}, 'volume': {'0': 2163600.0, '1': 2386400.0, '2': 2489500.0, '3': 2006300.0}} <end_description>
2,186
0
3,247
2,186
129767631
from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten from keras.optimizers import RMSprop (mnist_train_images, mnist_train_labels), ( mnist_test_images, mnist_test_labels, ) = mnist.load_data() from keras import backend as K if K.image_data_format() == "channels_first": train_images = mnist_train_images.reshape(mnist_train_images.shape[0], 1, 28, 28) test_images = mnist_test_images.reshape(mnist_test_images.shape[0], 1, 28, 28) input_shape = (1, 28, 28) else: train_images = mnist_train_images.reshape(mnist_train_images.shape[0], 28, 28, 1) test_images = mnist_test_images.reshape(mnist_test_images.shape[0], 28, 28, 1) input_shape = (28, 28, 1) train_images = train_images.astype("float32") test_images = test_images.astype("float32") train_images /= 255 test_images /= 255 import keras train_labels = keras.utils.to_categorical(mnist_train_labels) test_labels = keras.utils.to_categorical(mnist_test_labels) import matplotlib.pyplot as plt def sample(num): print(train_labels[num]) label = train_labels[num].argmax(axis=0) image = train_images[num].reshape([28, 28]) plt.title("Sample: %d Label: %d" % (num, label)) plt.imshow(image, cmap=plt.get_cmap("gray_r")) plt.show() sample(7777) model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation="softmax")) model.summary() model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) history = model.fit( train_images, train_labels, batch_size=32, epochs=3, verbose=2, validation_data=(test_images, test_labels), ) score = model.evaluate(test_images, test_labels, verbose=0) print("Test loss:", score[0]) print("Test accuracy:", score[1])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/767/129767631.ipynb
null
null
[{"Id": 129767631, "ScriptId": 38593072, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13408885, "CreationDate": "05/16/2023 10:28:33", "VersionNumber": 1.0, "Title": "notebook6ff578d370", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 65.0, "LinesInsertedFromPrevious": 65.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten from keras.optimizers import RMSprop (mnist_train_images, mnist_train_labels), ( mnist_test_images, mnist_test_labels, ) = mnist.load_data() from keras import backend as K if K.image_data_format() == "channels_first": train_images = mnist_train_images.reshape(mnist_train_images.shape[0], 1, 28, 28) test_images = mnist_test_images.reshape(mnist_test_images.shape[0], 1, 28, 28) input_shape = (1, 28, 28) else: train_images = mnist_train_images.reshape(mnist_train_images.shape[0], 28, 28, 1) test_images = mnist_test_images.reshape(mnist_test_images.shape[0], 28, 28, 1) input_shape = (28, 28, 1) train_images = train_images.astype("float32") test_images = test_images.astype("float32") train_images /= 255 test_images /= 255 import keras train_labels = keras.utils.to_categorical(mnist_train_labels) test_labels = keras.utils.to_categorical(mnist_test_labels) import matplotlib.pyplot as plt def sample(num): print(train_labels[num]) label = train_labels[num].argmax(axis=0) image = train_images[num].reshape([28, 28]) plt.title("Sample: %d Label: %d" % (num, label)) plt.imshow(image, cmap=plt.get_cmap("gray_r")) plt.show() sample(7777) model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation="softmax")) model.summary() model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) history = model.fit( train_images, train_labels, batch_size=32, epochs=3, verbose=2, validation_data=(test_images, test_labels), ) score = model.evaluate(test_images, test_labels, verbose=0) print("Test loss:", score[0]) print("Test accuracy:", score[1])
false
0
741
0
741
741
129767729
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # **Imports** import xgboost as xgb import numpy as np import pandas as pd import os from xgboost import XGBRegressor # **Loading the data** train_data = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/train.csv") test_data = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/test.csv") oil_data = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/oil.csv") # **Taking a sample** train_data = train_data.sample(frac=0.01) # **Making new column for month** train_data["date"] = pd.to_datetime(train_data["date"]) train_data["Month"] = train_data["date"].dt.month # **Making new categorical column for number of store** train_data = pd.concat( [train_data, pd.get_dummies(train_data["store_nbr"], prefix="store_nbr")], axis=1 ) # **Making new column for day of the week** # *Should we turn it into a categorical feature? if we do so we are ignoring the connection between consecutive days, int type will also ignore the closeness between the first day of the week and the last one.... train_data["day"] = train_data["date"].dt.day_name() train_data["day"] = train_data["day"].astype("category") # ***Making new column for the sales in the previous day** # create a new column for the sale of the same product in the day before train_data["yesterday"] = train_data.groupby(["store_nbr", "family"])["sales"].shift(1) def fill_median(group): return group.fillna(group.median()) # Apply the fill_median function to each group # print the first 10 rows of the updated dataset median = train_data.groupby(["store_nbr", "family"])["sales"].transform(fill_median) train_data["yesterday"].fillna(median, inplace=True) train_data.describe() # **family -> categorical** train_data = pd.concat( [train_data, pd.get_dummies(train_data["family"], prefix="family")], axis=1 ) # **Converting day to categorical** train_data = pd.concat( [train_data, pd.get_dummies(train_data["day"], prefix="day")], axis=1 ) # Drop the original 'Feature1' column as it is no longer needed train_data = train_data.drop(columns=["day"]) # **train/test split** SPLIT_DATE = "2017-01-01" train = train_data[train_data["date"] <= SPLIT_DATE] test = train_data[train_data["date"] > SPLIT_DATE] # Columns to drop : 'id', 'date', 'store_nbr', 'family', 'sales' x_train = train.drop(["id", "date", "store_nbr", "family", "sales"], axis=1) y_train = train["sales"] x_test = test.drop(["id", "date", "store_nbr", "family", "sales"], axis=1) y_test = test["sales"] # **Model** my_model = XGBRegressor(n_estimators=1000, learning_rate=0.05, n_jobs=4) my_model.fit(x_train, y_train, verbose=False) x_train.info() # **Evaluation** # MSE # from sklearn.metrics import mean_squared_error # mse = mean_squared_error(y_test, y_pred) # print("Mean Squared Error: %.2f" % mse) # RMSLE y_pred len(y_test), len(y_pred) y_test.dropna() y_test = np.where(y_test <= 0, 1, y_test) y_pred = np.where(y_pred <= 0, 1, y_pred) import numpy as np from sklearn.metrics import mean_squared_error def rmsle(y_true, y_pred): return np.sqrt(mean_squared_error(np.log1p(y_true), np.log1p(y_pred))) y_eval = rmsle(y_test, y_pred) print(y_eval) # **prediction** y_pred = my_model.predict(x_test) print(y_pred)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/767/129767729.ipynb
null
null
[{"Id": 129767729, "ScriptId": 38113536, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11887966, "CreationDate": "05/16/2023 10:29:21", "VersionNumber": 2.0, "Title": "Fork of Store Sales 15/5/23", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 141.0, "LinesInsertedFromPrevious": 56.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 85.0, "LinesInsertedFromFork": 56.0, "LinesDeletedFromFork": 2.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 85.0, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # **Imports** import xgboost as xgb import numpy as np import pandas as pd import os from xgboost import XGBRegressor # **Loading the data** train_data = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/train.csv") test_data = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/test.csv") oil_data = pd.read_csv("/kaggle/input/store-sales-time-series-forecasting/oil.csv") # **Taking a sample** train_data = train_data.sample(frac=0.01) # **Making new column for month** train_data["date"] = pd.to_datetime(train_data["date"]) train_data["Month"] = train_data["date"].dt.month # **Making new categorical column for number of store** train_data = pd.concat( [train_data, pd.get_dummies(train_data["store_nbr"], prefix="store_nbr")], axis=1 ) # **Making new column for day of the week** # *Should we turn it into a categorical feature? if we do so we are ignoring the connection between consecutive days, int type will also ignore the closeness between the first day of the week and the last one.... train_data["day"] = train_data["date"].dt.day_name() train_data["day"] = train_data["day"].astype("category") # ***Making new column for the sales in the previous day** # create a new column for the sale of the same product in the day before train_data["yesterday"] = train_data.groupby(["store_nbr", "family"])["sales"].shift(1) def fill_median(group): return group.fillna(group.median()) # Apply the fill_median function to each group # print the first 10 rows of the updated dataset median = train_data.groupby(["store_nbr", "family"])["sales"].transform(fill_median) train_data["yesterday"].fillna(median, inplace=True) train_data.describe() # **family -> categorical** train_data = pd.concat( [train_data, pd.get_dummies(train_data["family"], prefix="family")], axis=1 ) # **Converting day to categorical** train_data = pd.concat( [train_data, pd.get_dummies(train_data["day"], prefix="day")], axis=1 ) # Drop the original 'Feature1' column as it is no longer needed train_data = train_data.drop(columns=["day"]) # **train/test split** SPLIT_DATE = "2017-01-01" train = train_data[train_data["date"] <= SPLIT_DATE] test = train_data[train_data["date"] > SPLIT_DATE] # Columns to drop : 'id', 'date', 'store_nbr', 'family', 'sales' x_train = train.drop(["id", "date", "store_nbr", "family", "sales"], axis=1) y_train = train["sales"] x_test = test.drop(["id", "date", "store_nbr", "family", "sales"], axis=1) y_test = test["sales"] # **Model** my_model = XGBRegressor(n_estimators=1000, learning_rate=0.05, n_jobs=4) my_model.fit(x_train, y_train, verbose=False) x_train.info() # **Evaluation** # MSE # from sklearn.metrics import mean_squared_error # mse = mean_squared_error(y_test, y_pred) # print("Mean Squared Error: %.2f" % mse) # RMSLE y_pred len(y_test), len(y_pred) y_test.dropna() y_test = np.where(y_test <= 0, 1, y_test) y_pred = np.where(y_pred <= 0, 1, y_pred) import numpy as np from sklearn.metrics import mean_squared_error def rmsle(y_true, y_pred): return np.sqrt(mean_squared_error(np.log1p(y_true), np.log1p(y_pred))) y_eval = rmsle(y_test, y_pred) print(y_eval) # **prediction** y_pred = my_model.predict(x_test) print(y_pred)
false
0
1,307
0
1,307
1,307
129767337
<jupyter_start><jupyter_text>Diabetes prediction dataset The **Diabetes prediction dataset** is a collection of medical and demographic data from patients, along with their diabetes status (positive or negative). The data includes features such as age, gender, body mass index (BMI), hypertension, heart disease, smoking history, HbA1c level, and blood glucose level. This dataset can be used to build machine learning models to predict diabetes in patients based on their medical history and demographic information. This can be useful for healthcare professionals in identifying patients who may be at risk of developing diabetes and in developing personalized treatment plans. Additionally, the dataset can be used by researchers to explore the relationships between various medical and demographic factors and the likelihood of developing diabetes. Kaggle dataset identifier: diabetes-prediction-dataset <jupyter_script>import pandas as pd import numpy as np import matplotlib.pyplot as plt df = pd.read_csv( "/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv" ) df.head() df.drop(columns=["gender", "age"], inplace=True) df.head() df["bmi"] = df.bmi.transform(lambda x: ((x - x.min()) / (x.max() - x.min()))) df.head() df["HbA1c_level"] = pd.cut( df["HbA1c_level"], bins=3, labels=["Optimal", "Elevated", "High"] ) df.head() df["blood_glucose_level"] = pd.cut( df["blood_glucose_level"], bins=3, labels=["Normal", "Prediabetes", "Diabetes"] ) df.head() X = df.drop(columns="diabetes") y = df.diabetes X = pd.get_dummies(X, columns=["smoking_history", "HbA1c_level", "blood_glucose_level"]) X.head() from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score rfc = RandomForestClassifier(n_estimators=10) scores = cross_val_score(rfc, X, y, cv=5) scores.mean() # # Splitting Data from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler X = df.drop(columns="diabetes") y = df.diabetes X = pd.get_dummies(X, columns=["smoking_history", "HbA1c_level", "blood_glucose_level"]) X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.2, random_state=48, stratify=y ) X_train.shape, X_test.shape, y_train.shape, y_test.shape from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score rfc = RandomForestClassifier(n_estimators=10) rfc.fit(X_train, y_train) rfc.score(X_test, y_test), rfc.score(X_train, y_train) # # Use MinMaxScaler for test Score! scaler = MinMaxScaler() scaler.fit(X_train) X_train_scaled = scaler.transform(X_train) X_test_scaled = scaler.transform(X_test) rfc.fit(X_train_scaled, y_train) rfc.score(X_test_scaled, y_test), rfc.score(X_train_scaled, y_train) # # Improvement data parameters n_estimators est = range(1, 51, 2) test_score = [] train_score = [] for e in est: rfc = RandomForestClassifier(n_estimators=e) rfc.fit(X_train, y_train) test_score.append(rfc.score(X_test, y_test)) train_score.append(rfc.score(X_train, y_train)) print(f"Max Score : {np.max(test_score)}") print(f"n_estimators : {est[np.argmax(test_score)]}") # # With Scaling MinMaxScaler # est = range(1, 51, 2) test_score = [] train_score = [] for e in est: rfc = RandomForestClassifier(n_estimators=e) rfc.fit(X_train_scaled, y_train) test_score.append(rfc.score(X_test_scaled, y_test)) train_score.append(rfc.score(X_train_scaled, y_train)) print(f"Max Score : {np.max(test_score)}") print(f"n_estimators : {est[np.argmax(test_score)]}") # # Pipeline from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import MinMaxScaler, OneHotEncoder from sklearn.compose import ColumnTransformer df.head() X = df.drop(columns=["diabetes"]) y = df.diabetes X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, stratify=y, random_state=42 ) X_train.shape, X_test.shape, y_train.shape, y_test.shape X_train.head() numerical_pipeline = Pipeline( [("imputer", SimpleImputer(strategy="median")), ("scale", MinMaxScaler())] ) categorical_pipeline = Pipeline( [("imputer", SimpleImputer(strategy="most_frequent")), ("onehot", OneHotEncoder())] ) X_train.head() preprocessor = ColumnTransformer( [ ("numeric", numerical_pipeline, ["hypertension", "heart_disease", "bmi"]), ( "categoric", categorical_pipeline, ["smoking_history", "HbA1c_level", "blood_glucose_level"], ), ] ) pipeline = Pipeline([("prep", preprocessor), ("algo", RandomForestClassifier())]) pipeline.fit(X_train, y_train) pipeline.score(X_test, y_test), pipeline.score(X_train, y_train) # # GridSearch CV from sklearn.model_selection import GridSearchCV parameters = { "algo__n_estimators": range(1, 71, 2), "algo__criterion": ["gini", "entropy", "log_loss"], } model = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1) model.fit(X_train, y_train) pd.DataFrame(model.cv_results_).sort_values("rank_test_score").head() model.best_params_ model.score(X_train, y_train), model.score(X_test, y_test)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/767/129767337.ipynb
diabetes-prediction-dataset
iammustafatz
[{"Id": 129767337, "ScriptId": 38582119, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10882816, "CreationDate": "05/16/2023 10:25:57", "VersionNumber": 1.0, "Title": "Diabetes Predict Pipeline", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 157.0, "LinesInsertedFromPrevious": 157.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 6}]
[{"Id": 186127216, "KernelVersionId": 129767337, "SourceDatasetVersionId": 5344155}]
[{"Id": 5344155, "DatasetId": 3102947, "DatasourceVersionId": 5417553, "CreatorUserId": 11427441, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "04/08/2023 06:11:45", "VersionNumber": 1.0, "Title": "Diabetes prediction dataset", "Slug": "diabetes-prediction-dataset", "Subtitle": "A Comprehensive Dataset for Predicting Diabetes with Medical & Demographic Data", "Description": "The **Diabetes prediction dataset** is a collection of medical and demographic data from patients, along with their diabetes status (positive or negative). The data includes features such as age, gender, body mass index (BMI), hypertension, heart disease, smoking history, HbA1c level, and blood glucose level. This dataset can be used to build machine learning models to predict diabetes in patients based on their medical history and demographic information. This can be useful for healthcare professionals in identifying patients who may be at risk of developing diabetes and in developing personalized treatment plans. Additionally, the dataset can be used by researchers to explore the relationships between various medical and demographic factors and the likelihood of developing diabetes.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3102947, "CreatorUserId": 11427441, "OwnerUserId": 11427441.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5344155.0, "CurrentDatasourceVersionId": 5417553.0, "ForumId": 3166206, "Type": 2, "CreationDate": "04/08/2023 06:11:45", "LastActivityDate": "04/08/2023", "TotalViews": 127619, "TotalDownloads": 24886, "TotalVotes": 309, "TotalKernels": 120}]
[{"Id": 11427441, "UserName": "iammustafatz", "DisplayName": "Mohammed Mustafa", "RegisterDate": "08/29/2022", "PerformanceTier": 0}]
import pandas as pd import numpy as np import matplotlib.pyplot as plt df = pd.read_csv( "/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv" ) df.head() df.drop(columns=["gender", "age"], inplace=True) df.head() df["bmi"] = df.bmi.transform(lambda x: ((x - x.min()) / (x.max() - x.min()))) df.head() df["HbA1c_level"] = pd.cut( df["HbA1c_level"], bins=3, labels=["Optimal", "Elevated", "High"] ) df.head() df["blood_glucose_level"] = pd.cut( df["blood_glucose_level"], bins=3, labels=["Normal", "Prediabetes", "Diabetes"] ) df.head() X = df.drop(columns="diabetes") y = df.diabetes X = pd.get_dummies(X, columns=["smoking_history", "HbA1c_level", "blood_glucose_level"]) X.head() from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score rfc = RandomForestClassifier(n_estimators=10) scores = cross_val_score(rfc, X, y, cv=5) scores.mean() # # Splitting Data from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler X = df.drop(columns="diabetes") y = df.diabetes X = pd.get_dummies(X, columns=["smoking_history", "HbA1c_level", "blood_glucose_level"]) X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.2, random_state=48, stratify=y ) X_train.shape, X_test.shape, y_train.shape, y_test.shape from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score rfc = RandomForestClassifier(n_estimators=10) rfc.fit(X_train, y_train) rfc.score(X_test, y_test), rfc.score(X_train, y_train) # # Use MinMaxScaler for test Score! scaler = MinMaxScaler() scaler.fit(X_train) X_train_scaled = scaler.transform(X_train) X_test_scaled = scaler.transform(X_test) rfc.fit(X_train_scaled, y_train) rfc.score(X_test_scaled, y_test), rfc.score(X_train_scaled, y_train) # # Improvement data parameters n_estimators est = range(1, 51, 2) test_score = [] train_score = [] for e in est: rfc = RandomForestClassifier(n_estimators=e) rfc.fit(X_train, y_train) test_score.append(rfc.score(X_test, y_test)) train_score.append(rfc.score(X_train, y_train)) print(f"Max Score : {np.max(test_score)}") print(f"n_estimators : {est[np.argmax(test_score)]}") # # With Scaling MinMaxScaler # est = range(1, 51, 2) test_score = [] train_score = [] for e in est: rfc = RandomForestClassifier(n_estimators=e) rfc.fit(X_train_scaled, y_train) test_score.append(rfc.score(X_test_scaled, y_test)) train_score.append(rfc.score(X_train_scaled, y_train)) print(f"Max Score : {np.max(test_score)}") print(f"n_estimators : {est[np.argmax(test_score)]}") # # Pipeline from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import MinMaxScaler, OneHotEncoder from sklearn.compose import ColumnTransformer df.head() X = df.drop(columns=["diabetes"]) y = df.diabetes X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, stratify=y, random_state=42 ) X_train.shape, X_test.shape, y_train.shape, y_test.shape X_train.head() numerical_pipeline = Pipeline( [("imputer", SimpleImputer(strategy="median")), ("scale", MinMaxScaler())] ) categorical_pipeline = Pipeline( [("imputer", SimpleImputer(strategy="most_frequent")), ("onehot", OneHotEncoder())] ) X_train.head() preprocessor = ColumnTransformer( [ ("numeric", numerical_pipeline, ["hypertension", "heart_disease", "bmi"]), ( "categoric", categorical_pipeline, ["smoking_history", "HbA1c_level", "blood_glucose_level"], ), ] ) pipeline = Pipeline([("prep", preprocessor), ("algo", RandomForestClassifier())]) pipeline.fit(X_train, y_train) pipeline.score(X_test, y_test), pipeline.score(X_train, y_train) # # GridSearch CV from sklearn.model_selection import GridSearchCV parameters = { "algo__n_estimators": range(1, 71, 2), "algo__criterion": ["gini", "entropy", "log_loss"], } model = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1) model.fit(X_train, y_train) pd.DataFrame(model.cv_results_).sort_values("rank_test_score").head() model.best_params_ model.score(X_train, y_train), model.score(X_test, y_test)
false
1
1,496
6
1,687
1,496
129710641
# ## Sophia Thompson - My Magic Project - Single Cell Segmentation # ### Importing Packages # 1. `numpy` (`np`) # 2. `matplotlib.pyplot` (`plt`) # 3. `cv2` import numpy as np import matplotlib.pyplot as plt import cv2 # This step imported the packages of the different types of analytical libraries # ### Exploring Data Types # #### In this cell we define the following variables and print their value and data type. # 1. variable "a" with a value as 15 # 2. variable "b" with a value as 7.654 # 3. variable "c" with a value as 'Image 1' # 4. variable "d" with a value as a random numpy array between 0 to 255 with a shape of 128x128 and datatype 'uint8' a = 15 b = 7.654 c = "Image 1" d = np.random.randint(0, 256, (128, 128)) # np.random.randint selects a random number between 0 and 255 - # 128, 128 is the dimentions of the array the numbers are going into d.max() # Here I defined my variables that I would be using below: # the variables are integers, strings, and floats # #### We converted the datatype of variable "d" to 'float' and scaled its values to be between 0 to Then we printed the minimum, maximum, variance, shape, and datatype of the converted version of variable "d". d = d.astype(np.float) d = d / 250 print("The minimum is ", d.min()) print("The maxiumum is", d.max()) print("The variance is", d.std() ** 2) print("The shape is", d.shape) print("The datatype is", d.dtype) # Here I divided my variable d by 255, so that my range would be between 0 and 1 # I then asked python to print the maxiumum, minimum, and more for my array # min, max and std are functions that need to be called() # shape and dtype are properties of the array and therefore do not need to be called # ### Analyzing Images with Histograms # #### Using only Python commands, we wrote a function named `get_histogram(array)` that takes any numpy arrays with a shape of `WxH` and returns a tuple of two items (i.e., `tuple(edge, histogram)`). Both items in the returned tuple must be numpy arrays with a shape of `(100,)` and their datatypes must be`float` and `int`, respectively. In the `edge` item, you need to set the value for the center of each histogram bin for 100 bins (For example, if the input array values are between 0 and 100, the edges must be `[0.5, 1.5, 2.5, ..., 99.5]`, representing values between `[0, 1), [1, 2), [2, 3), ..., [99 100]`). For the `histogram` item, the array must contain the total number of cells in the input array that lies in between the mentioned values accordingly. # this piece of code can take any array and return a histogram with buckets # this counts the amount of pixels in the buckets, and then the average of the bucket edges def my_histogram(d, num): # d = a dummy variable acting as the image # edges are buckets # We placed them between the maximum and the minimum of the image # there are num buckets see code below edges = np.linspace(d.min(), d.max(), num=num) count_array = np.zeros(num - 1) # count array is an empty variable to be filled with counts for each bucket middle_array = np.zeros(num - 1) # middle array is the middle of your bucket - the average of the edges for i in range(num - 1): # We are looping through the buckets # bottom limit and top limit are the two edges of the bucket bottom_limit = edges[i] top_limit = edges[i + 1] count = ((d <= top_limit) & (d > bottom_limit)).sum() # count is counting the amount of pixels of d that falls within the bucket # the height of the bar in the histogram is the count middle = ((bottom_limit) + (top_limit)) / 2 # middle is the average of the the bucket edges - the limits middle_array[i] = middle count_array[i] = count # We saved the count and the middle, and then returned it at the bottom of the loop return count_array, middle_array count_array, middle_array = my_histogram(d, 100) print(count_array) # for the definded variable "d" the counts look pretty unifrom across the bins # because "d" is filled with random numbers # ###Remember The sum of `histogram` must be exactly equal to `W*H`. # #### We used the `plot` function of the `plt` package, to write a function named `plot_histogram(array)` that takes any numpy array with a shape of `WxH`, and displays the histogram of the input array. We used our previously defined function. We also made sure to get the title, x and y axes' labels, and legend for your plot. We added an argument to set the title and color, so that we can reuse the function in the future def plot_histogram(array, title="Sophia_HISTOGRAM", color="blue"): # we call the my_histogram function to get the counts and the edges # we set the number of bins to be 100 for all histograms - this can be changed # we choose to use the plot function rather than the bar function # this lets us better see the shape of individual peaks and overlay multiple histograms histogram, edge = my_histogram(array, 100) plt.figure(figsize=(12, 8)) plt.title(title) plt.plot(edge, histogram, color=color, label="Array's histogram") plt.xlabel("Edges") plt.ylabel("Repetitions") plot_histogram(d, "Sophia_HISTOGRAM", "blue") plot_histogram(d, "Summer Time", "pink") # we titled and labeld the histogram # we can modify the title and the color because those are input parameters to the function # the histogram looks like random noise because 'd' is generated by random numbers # #### We downloaded the images from the following address using the `wget` bash command and made sure it is downloaded correctly using the `ls` command. # wget and ls are linux command line functions # wget: wget will download information from the website given # ls: ls will print the contents of a directory/folder # equivilent of finder on a mac (a list of whats in your documents) # Image address: https://raw.githubusercontent.com/soroush361/AoE_BME/main/cell_sample.png # True mask address: https://raw.githubusercontent.com/soroush361/AoE_BME/main/cell_sample_mask.png # this downloads my images into my notebook # ### Image visualization and manipulation # #### We read the downloaded image (cell_sample.png) as a color image using OpenCV's functions. Then we converted the image from BGR to RGB format, scaled all pixels' values to be between 0 and 1, and printed the shape of the image, datatype, minimum and maximum value per channel (RGB). # *Remember: Make sure the datatype is `float32` and not the `float64`. # this piece of code reads the image, and makes the image from BGR to RGB # then it makes all of the pixel's values between 0 and 1 by dividing it by 255, # it then prints the max, min, varience, shape, datatype img = cv2.imread("/kaggle/working/cell_sample.png") img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img.astype(np.float32) img = img / 255 print("The minimum is ", img.min()) print("The maxiumum is", img.max()) print("The variance is", img.std() ** 2) print("The shape is", img.shape) print("The datatype is", img.dtype) # #### We displayed the image using the `matplotlib` package, and then ploted the histogram of the image for each channel (RGB) using my `plot_histogram(array)` function. plot_histogram(img[:, :, 0], "red_HISTOGRAM", color="red") plot_histogram(img[:, :, 1], "green_HISTOGRAM", color="green") plot_histogram(img[:, :, 2], "blue_HISTOGRAM", color="blue") # this code plots my red, green and blue histograms, and labels them based on their color # #### We defined a new function as `plot_RGB(array)` in which the function will display the RGB image and the RGB histogram side-by-side. # this code places the original image side by side with the histogram that overlays all three colors histograms def plot_RGB(array, title="Original Image"): plt.figure(figsize=(12, 6)) # array is the array from the image # we make a subplot to display the image using plt.imshow # we do plt.axis('off') so that matplotlib does not create and x and y axis for this image plt.subplot(1, 2, 1) plt.title(title) plt.imshow(array) plt.axis("off") plt.subplot(1, 2, 2) plt.title("Histogram") for i in range(3): histogram, edge = my_histogram(array[:, :, i], 100) plt.plot( edge, histogram, label=f"Channel {Channel_names[i]}", color=Channel_names[i].lower(), ) plt.xlabel("Intensity") plt.ylabel("Abundance") plt.legend() # we made a second subplot so they would be side by side # we use for i in range(3) to repeat the plotting function for all three color channels plt.tight_layout Channel_names = ["Red", "Green", "Blue"] plot_RGB(img) # ### Pre-Processing # #### We want to blur the image with a moving average filter of size $5\times5$. Then, using the `filter2d` function from the OpenCV package, we filtered the image. Also, we displayed the filtered image using the `plot_RGB(array)` function. # (1) We apply a 5x5 kernel for slight blurring # (2) We apply a 50x50 kernel to compare output results for strong blurring (how does a filer affect the image) # this piece of code places a blurring filter on my image, in order to smooth the graph kernel = np.ones((5, 5)) / (5 * 5) print(kernel.shape) filtered_img = cv2.filter2D(img, -1, kernel) plot_RGB(filtered_img, title="filtered_img") # this is an exaple of what can happen if you blur it too much kernel = np.ones((50, 50)) / (50 * 50) print(kernel.shape) filtered_img2 = cv2.filter2D(img, -1, kernel) plot_RGB(filtered_img2, title="filtered_img2") # ### Image segmentation # #### We reloaded the image from the file (cell_sample.png) using OpenCV functions, but did not rescale it (Remember: make sure the datatype remains as `uint8`). Then, we converted the BGR image to grayscale and created a binary mask using the OpenCV package's `threshold` function. The threshold value is $127$. # *Remember: To make the binary mask, you should consider the following formula: # $$ # \forall x, y \quad mask(x,y) = \begin{cases} # 1\quad\text{if } gray(x,y) \ge 127 \\ # 0\quad\text{if } gray(x,y) < 127 \\ # \end{cases} # $$ # x,y is the location/index of a pixel, we repeat this for all pixels # $\forall = $ this symbol means for all pixels, x, y # #### Then, we loaded the ground-truth image (cell_sample_mask.png) and displayed all grayscale, thresholded, and the ground truth images side-by-side. img = cv2.imread("/kaggle/working/cell_sample.png") # load image img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # change color channels gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # change to gray scale print(gray_img.max()) # prints maxiumum value of gray image ret, mask_img = cv2.threshold( gray_img, 127, 255, cv2.THRESH_BINARY ) # apply thresholding to make binary print(mask_img.sum()) # counts all the white pixels in mask Ground_Truth = cv2.imread("/kaggle/working/cell_sample_mask.png") # load ground truth # this piece of code displays all four versions of the image side by side plt.figure(figsize=(15, 8)) plt.subplot(1, 4, 1) plt.title("Original Image") plt.imshow(img) plt.axis("off") plt.subplot(1, 4, 2) plt.title("Gray Image") plt.imshow(gray_img, cmap="gray") plt.axis("off") plt.subplot(1, 4, 3) plt.title("Mask Image") plt.imshow(mask_img, cmap="gray") plt.axis("off") plt.subplot(1, 4, 4) plt.title("Ground Truth") plt.imshow(Ground_Truth, cmap="gray") plt.axis("off") plt.tight_layout() plt.show() plt.close() # #### We used the OpenCV package for the active contour algorithm for image segmentation: https://learnopencv.com/contour-detection-using-opencv-python-c/. # #### We applyed the algorithm on the binary mask we created by thresholding the grayscale image from the previous cell. We drew the contours on a copy of the original image, and displayed the result. # *Remember: The `matplotlib` cannot display BGR images correctly. After drawing the contours, you need to convert the copy image to RGB to be displayed correctly. # We read this document https://learnopencv.com/contour-detection-using-opencv-python-c/ # this code draws the contours on the original image based the thresholding I did to the image contors, hierarchy = cv2.findContours( image=mask_img, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE ) print("The number of cells found is", len(contors)) copy_img = img.copy() cv2.drawContours( image=copy_img, contours=contors, contourIdx=-1, color=(255, 0, 0), thickness=1, lineType=cv2.LINE_AA, ) plt.figure(figsize=(12, 8)) plt.subplot(1, 2, 1) plt.title("Ground Truth") plt.imshow(Ground_Truth, cmap="gray") plt.axis("off") plt.subplot(1, 2, 2) plt.title("Sophias Contors") plt.imshow(copy_img) plt.axis("off") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/710/129710641.ipynb
null
null
[{"Id": 129710641, "ScriptId": 37050488, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13378209, "CreationDate": "05/16/2023 00:21:25", "VersionNumber": 10.0, "Title": "Sophia Thompson - MAGIC Project", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 292.0, "LinesInsertedFromPrevious": 113.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 179.0, "LinesInsertedFromFork": 268.0, "LinesDeletedFromFork": 106.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 24.0, "TotalVotes": 0}]
null
null
null
null
# ## Sophia Thompson - My Magic Project - Single Cell Segmentation # ### Importing Packages # 1. `numpy` (`np`) # 2. `matplotlib.pyplot` (`plt`) # 3. `cv2` import numpy as np import matplotlib.pyplot as plt import cv2 # This step imported the packages of the different types of analytical libraries # ### Exploring Data Types # #### In this cell we define the following variables and print their value and data type. # 1. variable "a" with a value as 15 # 2. variable "b" with a value as 7.654 # 3. variable "c" with a value as 'Image 1' # 4. variable "d" with a value as a random numpy array between 0 to 255 with a shape of 128x128 and datatype 'uint8' a = 15 b = 7.654 c = "Image 1" d = np.random.randint(0, 256, (128, 128)) # np.random.randint selects a random number between 0 and 255 - # 128, 128 is the dimentions of the array the numbers are going into d.max() # Here I defined my variables that I would be using below: # the variables are integers, strings, and floats # #### We converted the datatype of variable "d" to 'float' and scaled its values to be between 0 to Then we printed the minimum, maximum, variance, shape, and datatype of the converted version of variable "d". d = d.astype(np.float) d = d / 250 print("The minimum is ", d.min()) print("The maxiumum is", d.max()) print("The variance is", d.std() ** 2) print("The shape is", d.shape) print("The datatype is", d.dtype) # Here I divided my variable d by 255, so that my range would be between 0 and 1 # I then asked python to print the maxiumum, minimum, and more for my array # min, max and std are functions that need to be called() # shape and dtype are properties of the array and therefore do not need to be called # ### Analyzing Images with Histograms # #### Using only Python commands, we wrote a function named `get_histogram(array)` that takes any numpy arrays with a shape of `WxH` and returns a tuple of two items (i.e., `tuple(edge, histogram)`). Both items in the returned tuple must be numpy arrays with a shape of `(100,)` and their datatypes must be`float` and `int`, respectively. In the `edge` item, you need to set the value for the center of each histogram bin for 100 bins (For example, if the input array values are between 0 and 100, the edges must be `[0.5, 1.5, 2.5, ..., 99.5]`, representing values between `[0, 1), [1, 2), [2, 3), ..., [99 100]`). For the `histogram` item, the array must contain the total number of cells in the input array that lies in between the mentioned values accordingly. # this piece of code can take any array and return a histogram with buckets # this counts the amount of pixels in the buckets, and then the average of the bucket edges def my_histogram(d, num): # d = a dummy variable acting as the image # edges are buckets # We placed them between the maximum and the minimum of the image # there are num buckets see code below edges = np.linspace(d.min(), d.max(), num=num) count_array = np.zeros(num - 1) # count array is an empty variable to be filled with counts for each bucket middle_array = np.zeros(num - 1) # middle array is the middle of your bucket - the average of the edges for i in range(num - 1): # We are looping through the buckets # bottom limit and top limit are the two edges of the bucket bottom_limit = edges[i] top_limit = edges[i + 1] count = ((d <= top_limit) & (d > bottom_limit)).sum() # count is counting the amount of pixels of d that falls within the bucket # the height of the bar in the histogram is the count middle = ((bottom_limit) + (top_limit)) / 2 # middle is the average of the the bucket edges - the limits middle_array[i] = middle count_array[i] = count # We saved the count and the middle, and then returned it at the bottom of the loop return count_array, middle_array count_array, middle_array = my_histogram(d, 100) print(count_array) # for the definded variable "d" the counts look pretty unifrom across the bins # because "d" is filled with random numbers # ###Remember The sum of `histogram` must be exactly equal to `W*H`. # #### We used the `plot` function of the `plt` package, to write a function named `plot_histogram(array)` that takes any numpy array with a shape of `WxH`, and displays the histogram of the input array. We used our previously defined function. We also made sure to get the title, x and y axes' labels, and legend for your plot. We added an argument to set the title and color, so that we can reuse the function in the future def plot_histogram(array, title="Sophia_HISTOGRAM", color="blue"): # we call the my_histogram function to get the counts and the edges # we set the number of bins to be 100 for all histograms - this can be changed # we choose to use the plot function rather than the bar function # this lets us better see the shape of individual peaks and overlay multiple histograms histogram, edge = my_histogram(array, 100) plt.figure(figsize=(12, 8)) plt.title(title) plt.plot(edge, histogram, color=color, label="Array's histogram") plt.xlabel("Edges") plt.ylabel("Repetitions") plot_histogram(d, "Sophia_HISTOGRAM", "blue") plot_histogram(d, "Summer Time", "pink") # we titled and labeld the histogram # we can modify the title and the color because those are input parameters to the function # the histogram looks like random noise because 'd' is generated by random numbers # #### We downloaded the images from the following address using the `wget` bash command and made sure it is downloaded correctly using the `ls` command. # wget and ls are linux command line functions # wget: wget will download information from the website given # ls: ls will print the contents of a directory/folder # equivilent of finder on a mac (a list of whats in your documents) # Image address: https://raw.githubusercontent.com/soroush361/AoE_BME/main/cell_sample.png # True mask address: https://raw.githubusercontent.com/soroush361/AoE_BME/main/cell_sample_mask.png # this downloads my images into my notebook # ### Image visualization and manipulation # #### We read the downloaded image (cell_sample.png) as a color image using OpenCV's functions. Then we converted the image from BGR to RGB format, scaled all pixels' values to be between 0 and 1, and printed the shape of the image, datatype, minimum and maximum value per channel (RGB). # *Remember: Make sure the datatype is `float32` and not the `float64`. # this piece of code reads the image, and makes the image from BGR to RGB # then it makes all of the pixel's values between 0 and 1 by dividing it by 255, # it then prints the max, min, varience, shape, datatype img = cv2.imread("/kaggle/working/cell_sample.png") img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img.astype(np.float32) img = img / 255 print("The minimum is ", img.min()) print("The maxiumum is", img.max()) print("The variance is", img.std() ** 2) print("The shape is", img.shape) print("The datatype is", img.dtype) # #### We displayed the image using the `matplotlib` package, and then ploted the histogram of the image for each channel (RGB) using my `plot_histogram(array)` function. plot_histogram(img[:, :, 0], "red_HISTOGRAM", color="red") plot_histogram(img[:, :, 1], "green_HISTOGRAM", color="green") plot_histogram(img[:, :, 2], "blue_HISTOGRAM", color="blue") # this code plots my red, green and blue histograms, and labels them based on their color # #### We defined a new function as `plot_RGB(array)` in which the function will display the RGB image and the RGB histogram side-by-side. # this code places the original image side by side with the histogram that overlays all three colors histograms def plot_RGB(array, title="Original Image"): plt.figure(figsize=(12, 6)) # array is the array from the image # we make a subplot to display the image using plt.imshow # we do plt.axis('off') so that matplotlib does not create and x and y axis for this image plt.subplot(1, 2, 1) plt.title(title) plt.imshow(array) plt.axis("off") plt.subplot(1, 2, 2) plt.title("Histogram") for i in range(3): histogram, edge = my_histogram(array[:, :, i], 100) plt.plot( edge, histogram, label=f"Channel {Channel_names[i]}", color=Channel_names[i].lower(), ) plt.xlabel("Intensity") plt.ylabel("Abundance") plt.legend() # we made a second subplot so they would be side by side # we use for i in range(3) to repeat the plotting function for all three color channels plt.tight_layout Channel_names = ["Red", "Green", "Blue"] plot_RGB(img) # ### Pre-Processing # #### We want to blur the image with a moving average filter of size $5\times5$. Then, using the `filter2d` function from the OpenCV package, we filtered the image. Also, we displayed the filtered image using the `plot_RGB(array)` function. # (1) We apply a 5x5 kernel for slight blurring # (2) We apply a 50x50 kernel to compare output results for strong blurring (how does a filer affect the image) # this piece of code places a blurring filter on my image, in order to smooth the graph kernel = np.ones((5, 5)) / (5 * 5) print(kernel.shape) filtered_img = cv2.filter2D(img, -1, kernel) plot_RGB(filtered_img, title="filtered_img") # this is an exaple of what can happen if you blur it too much kernel = np.ones((50, 50)) / (50 * 50) print(kernel.shape) filtered_img2 = cv2.filter2D(img, -1, kernel) plot_RGB(filtered_img2, title="filtered_img2") # ### Image segmentation # #### We reloaded the image from the file (cell_sample.png) using OpenCV functions, but did not rescale it (Remember: make sure the datatype remains as `uint8`). Then, we converted the BGR image to grayscale and created a binary mask using the OpenCV package's `threshold` function. The threshold value is $127$. # *Remember: To make the binary mask, you should consider the following formula: # $$ # \forall x, y \quad mask(x,y) = \begin{cases} # 1\quad\text{if } gray(x,y) \ge 127 \\ # 0\quad\text{if } gray(x,y) < 127 \\ # \end{cases} # $$ # x,y is the location/index of a pixel, we repeat this for all pixels # $\forall = $ this symbol means for all pixels, x, y # #### Then, we loaded the ground-truth image (cell_sample_mask.png) and displayed all grayscale, thresholded, and the ground truth images side-by-side. img = cv2.imread("/kaggle/working/cell_sample.png") # load image img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # change color channels gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # change to gray scale print(gray_img.max()) # prints maxiumum value of gray image ret, mask_img = cv2.threshold( gray_img, 127, 255, cv2.THRESH_BINARY ) # apply thresholding to make binary print(mask_img.sum()) # counts all the white pixels in mask Ground_Truth = cv2.imread("/kaggle/working/cell_sample_mask.png") # load ground truth # this piece of code displays all four versions of the image side by side plt.figure(figsize=(15, 8)) plt.subplot(1, 4, 1) plt.title("Original Image") plt.imshow(img) plt.axis("off") plt.subplot(1, 4, 2) plt.title("Gray Image") plt.imshow(gray_img, cmap="gray") plt.axis("off") plt.subplot(1, 4, 3) plt.title("Mask Image") plt.imshow(mask_img, cmap="gray") plt.axis("off") plt.subplot(1, 4, 4) plt.title("Ground Truth") plt.imshow(Ground_Truth, cmap="gray") plt.axis("off") plt.tight_layout() plt.show() plt.close() # #### We used the OpenCV package for the active contour algorithm for image segmentation: https://learnopencv.com/contour-detection-using-opencv-python-c/. # #### We applyed the algorithm on the binary mask we created by thresholding the grayscale image from the previous cell. We drew the contours on a copy of the original image, and displayed the result. # *Remember: The `matplotlib` cannot display BGR images correctly. After drawing the contours, you need to convert the copy image to RGB to be displayed correctly. # We read this document https://learnopencv.com/contour-detection-using-opencv-python-c/ # this code draws the contours on the original image based the thresholding I did to the image contors, hierarchy = cv2.findContours( image=mask_img, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE ) print("The number of cells found is", len(contors)) copy_img = img.copy() cv2.drawContours( image=copy_img, contours=contors, contourIdx=-1, color=(255, 0, 0), thickness=1, lineType=cv2.LINE_AA, ) plt.figure(figsize=(12, 8)) plt.subplot(1, 2, 1) plt.title("Ground Truth") plt.imshow(Ground_Truth, cmap="gray") plt.axis("off") plt.subplot(1, 2, 2) plt.title("Sophias Contors") plt.imshow(copy_img) plt.axis("off") plt.show()
false
0
3,787
0
3,787
3,787
129710884
<jupyter_start><jupyter_text>MNIST Dataset ### Context MNIST is a subset of a larger set available from NIST (it's copied from http://yann.lecun.com/exdb/mnist/) ### Content The MNIST database of handwritten digits has a training set of 60,000 examples, and a test set of 10,000 examples. . Four files are available: - train-images-idx3-ubyte.gz: training set images (9912422 bytes) - train-labels-idx1-ubyte.gz: training set labels (28881 bytes) - t10k-images-idx3-ubyte.gz: test set images (1648877 bytes) - t10k-labels-idx1-ubyte.gz: test set labels (4542 bytes) ### How to read See [sample MNIST reader][1] Kaggle dataset identifier: mnist-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # This is a sample Notebook to demonstrate how to read "MNIST Dataset" # import numpy as np # linear algebra import struct from array import array from os.path import join # # MNIST Data Loader Class # class MnistDataloader(object): def __init__( self, training_images_filepath, training_labels_filepath, test_images_filepath, test_labels_filepath, ): self.training_images_filepath = training_images_filepath self.training_labels_filepath = training_labels_filepath self.test_images_filepath = test_images_filepath self.test_labels_filepath = test_labels_filepath def read_images_labels(self, images_filepath, labels_filepath): labels = [] with open(labels_filepath, "rb") as file: magic, size = struct.unpack(">II", file.read(8)) if magic != 2049: raise ValueError( "Magic number mismatch, expected 2049, got {}".format(magic) ) labels = array("B", file.read()) with open(images_filepath, "rb") as file: magic, size, rows, cols = struct.unpack(">IIII", file.read(16)) if magic != 2051: raise ValueError( "Magic number mismatch, expected 2051, got {}".format(magic) ) image_data = array("B", file.read()) images = [] for i in range(size): images.append([0] * rows * cols) for i in range(size): img = np.array(image_data[i * rows * cols : (i + 1) * rows * cols]) img = img.reshape(28, 28) images[i][:] = img return images, labels def load_data(self): x_train, y_train = self.read_images_labels( self.training_images_filepath, self.training_labels_filepath ) x_test, y_test = self.read_images_labels( self.test_images_filepath, self.test_labels_filepath ) return (x_train, y_train), (x_test, y_test) # !wget https://github.com/fboldt/aulasann/raw/main/mnist.zip # !unzip mnist.zip # # Verify Reading Dataset via MnistDataloader class # import random import matplotlib.pyplot as plt train_path = "../input/dog-breed-identification/train" test_path = "../input/dog-breed-identification/test" # reading dataset labels train_labels = pd.read_csv("../input/dog-breed-identification/labels.csv") test_labels = pd.read_csv("../input/dog-breed-identification/sample_submission.csv") train_labels["id"] = train_labels["id"].apply(lambda x: x + ".jpg") test_labels["id"] = test_labels["id"].apply(lambda x: x + ".jpg") x = train_labels.breed.unique() include = [ "beagle", "chihuahua", "doberman", "french_bulldog", "golden_retriever", "malamute", "pug", "saint_bernard", "scottish_deerhound", "tibetan_mastiff", ] for i in x: if i not in include: train_labels = train_labels.drop(train_labels[train_labels["breed"] == i].index) print(train_labels.shape) train_labels.head() train_datagen = ImageDataGenerator(rescale=1) train_set = train_datagen.flow_from_dataframe( dataframe=train_labels, directory=train_path, x_col="id", y_col="breed", batch_size=16, subset="training", class_mode="categorical", target_size=(224, 224), seed=42, shuffle=True, ) validate_set = train_datagen.flow_from_dataframe( dataframe=train_labels, directory=train_path, x_col="id", y_col="breed", batch_size=16, subset="validation", class_mode="categorical", target_size=(224, 224), seed=42, shuffle=True, ) test_datagen = ImageDataGenerator(rescale=1) # defining test set test_set = test_datagen.flow_from_dataframe( dataframe=test_labels, directory=test_path, x_col="id", y_col=None, batch_size=16, class_mode=None, seed=42, shuffle=False, target_size=(224, 224), ) validate_set.batch_size from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin from sklearn.pipeline import Pipeline from tensorflow import keras from tensorflow.keras import layers from sklearn.metrics import accuracy_score import numpy as np class RedeNeural(BaseEstimator, ClassifierMixin): def __init__(self, epochs=5, batch_size=128): self.epochs = epochs self.batch_size = batch_size def fit(self, X, y): self.labels, ids = np.unique(y, return_inverse=True) yhot = keras.utils.to_categorical(ids) self.model = keras.Sequential( [ layers.Conv2D(96, (4, 4), strides=4, activation="relu"), layers.MaxPool2D((3, 3), strides=2), layers.Flatten(), layers.Dense(512, activation="relu"), layers.Dense(yhot.shape[1], activation="softmax"), ] ) self.model.compile( optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"] ) self.model.fit(X, yhot, epochs=self.epochs, batch_size=self.batch_size) return self def predict(self, X, y=None): probabilities = self.model.predict(X) ypred = self.labels[np.argmax(probabilities, axis=1)] return ypred class DividePor255(BaseEstimator, TransformerMixin): def fit(self, X, y): return self def transform(self, X, y=None): return np.array(X, dtype="float32") / 255 class MudaShape(BaseEstimator, TransformerMixin): def fit(self, X, y): return self def transform(self, X, y=None): return X.reshape((-1, 28, 28, 1)) modelo = Pipeline( [("scaler", DividePor255()), ("reshape", MudaShape()), ("ann", RedeNeural())] ) modelo.fit(train_set, test_labels) ypred = modelo.predict(test_set)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/710/129710884.ipynb
mnist-dataset
hojjatk
[{"Id": 129710884, "ScriptId": 38572066, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7179670, "CreationDate": "05/16/2023 00:25:44", "VersionNumber": 1.0, "Title": "dog-breed", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 182.0, "LinesInsertedFromPrevious": 84.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 98.0, "LinesInsertedFromFork": 84.0, "LinesDeletedFromFork": 78.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 98.0, "TotalVotes": 0}]
[{"Id": 186046058, "KernelVersionId": 129710884, "SourceDatasetVersionId": 242592}]
[{"Id": 242592, "DatasetId": 102285, "DatasourceVersionId": 254413, "CreatorUserId": 1840515, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "01/08/2019 13:01:57", "VersionNumber": 1.0, "Title": "MNIST Dataset", "Slug": "mnist-dataset", "Subtitle": "The MNIST database of handwritten digits (http://yann.lecun.com)", "Description": "### Context\n\nMNIST is a subset of a larger set available from NIST (it's copied from http://yann.lecun.com/exdb/mnist/)\n\n\n### Content\nThe MNIST database of handwritten digits has a training set of 60,000 examples, and a test set of 10,000 examples. .\nFour files are available:\n\n - train-images-idx3-ubyte.gz: training set images (9912422 bytes) \n - train-labels-idx1-ubyte.gz: training set labels (28881 bytes)\n - t10k-images-idx3-ubyte.gz: test set images (1648877 bytes) \n - t10k-labels-idx1-ubyte.gz: test set labels (4542 bytes)\n\n### How to read\nSee [sample MNIST reader][1]\n\n### Acknowledgements\n* Yann LeCun, Courant Institute, NYU\n* Corinna Cortes, Google Labs, New York\n* Christopher J.C. Burges, Microsoft Research, Redmond\n\n### Inspiration\nMany methods have been tested with this training set and test set (see http://yann.lecun.com/exdb/mnist/ for more details)\n\n\n [1]: https://www.kaggle.com/hojjatk/read-mnist-dataset", "VersionNotes": "Initial release", "TotalCompressedBytes": 11594722.0, "TotalUncompressedBytes": 11594722.0}]
[{"Id": 102285, "CreatorUserId": 1840515, "OwnerUserId": 1840515.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 242592.0, "CurrentDatasourceVersionId": 254413.0, "ForumId": 111993, "Type": 2, "CreationDate": "01/08/2019 13:01:57", "LastActivityDate": "01/08/2019", "TotalViews": 113247, "TotalDownloads": 16600, "TotalVotes": 111, "TotalKernels": 67}]
[{"Id": 1840515, "UserName": "hojjatk", "DisplayName": "Hojjat Khodabakhsh", "RegisterDate": "04/20/2018", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # This is a sample Notebook to demonstrate how to read "MNIST Dataset" # import numpy as np # linear algebra import struct from array import array from os.path import join # # MNIST Data Loader Class # class MnistDataloader(object): def __init__( self, training_images_filepath, training_labels_filepath, test_images_filepath, test_labels_filepath, ): self.training_images_filepath = training_images_filepath self.training_labels_filepath = training_labels_filepath self.test_images_filepath = test_images_filepath self.test_labels_filepath = test_labels_filepath def read_images_labels(self, images_filepath, labels_filepath): labels = [] with open(labels_filepath, "rb") as file: magic, size = struct.unpack(">II", file.read(8)) if magic != 2049: raise ValueError( "Magic number mismatch, expected 2049, got {}".format(magic) ) labels = array("B", file.read()) with open(images_filepath, "rb") as file: magic, size, rows, cols = struct.unpack(">IIII", file.read(16)) if magic != 2051: raise ValueError( "Magic number mismatch, expected 2051, got {}".format(magic) ) image_data = array("B", file.read()) images = [] for i in range(size): images.append([0] * rows * cols) for i in range(size): img = np.array(image_data[i * rows * cols : (i + 1) * rows * cols]) img = img.reshape(28, 28) images[i][:] = img return images, labels def load_data(self): x_train, y_train = self.read_images_labels( self.training_images_filepath, self.training_labels_filepath ) x_test, y_test = self.read_images_labels( self.test_images_filepath, self.test_labels_filepath ) return (x_train, y_train), (x_test, y_test) # !wget https://github.com/fboldt/aulasann/raw/main/mnist.zip # !unzip mnist.zip # # Verify Reading Dataset via MnistDataloader class # import random import matplotlib.pyplot as plt train_path = "../input/dog-breed-identification/train" test_path = "../input/dog-breed-identification/test" # reading dataset labels train_labels = pd.read_csv("../input/dog-breed-identification/labels.csv") test_labels = pd.read_csv("../input/dog-breed-identification/sample_submission.csv") train_labels["id"] = train_labels["id"].apply(lambda x: x + ".jpg") test_labels["id"] = test_labels["id"].apply(lambda x: x + ".jpg") x = train_labels.breed.unique() include = [ "beagle", "chihuahua", "doberman", "french_bulldog", "golden_retriever", "malamute", "pug", "saint_bernard", "scottish_deerhound", "tibetan_mastiff", ] for i in x: if i not in include: train_labels = train_labels.drop(train_labels[train_labels["breed"] == i].index) print(train_labels.shape) train_labels.head() train_datagen = ImageDataGenerator(rescale=1) train_set = train_datagen.flow_from_dataframe( dataframe=train_labels, directory=train_path, x_col="id", y_col="breed", batch_size=16, subset="training", class_mode="categorical", target_size=(224, 224), seed=42, shuffle=True, ) validate_set = train_datagen.flow_from_dataframe( dataframe=train_labels, directory=train_path, x_col="id", y_col="breed", batch_size=16, subset="validation", class_mode="categorical", target_size=(224, 224), seed=42, shuffle=True, ) test_datagen = ImageDataGenerator(rescale=1) # defining test set test_set = test_datagen.flow_from_dataframe( dataframe=test_labels, directory=test_path, x_col="id", y_col=None, batch_size=16, class_mode=None, seed=42, shuffle=False, target_size=(224, 224), ) validate_set.batch_size from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin from sklearn.pipeline import Pipeline from tensorflow import keras from tensorflow.keras import layers from sklearn.metrics import accuracy_score import numpy as np class RedeNeural(BaseEstimator, ClassifierMixin): def __init__(self, epochs=5, batch_size=128): self.epochs = epochs self.batch_size = batch_size def fit(self, X, y): self.labels, ids = np.unique(y, return_inverse=True) yhot = keras.utils.to_categorical(ids) self.model = keras.Sequential( [ layers.Conv2D(96, (4, 4), strides=4, activation="relu"), layers.MaxPool2D((3, 3), strides=2), layers.Flatten(), layers.Dense(512, activation="relu"), layers.Dense(yhot.shape[1], activation="softmax"), ] ) self.model.compile( optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"] ) self.model.fit(X, yhot, epochs=self.epochs, batch_size=self.batch_size) return self def predict(self, X, y=None): probabilities = self.model.predict(X) ypred = self.labels[np.argmax(probabilities, axis=1)] return ypred class DividePor255(BaseEstimator, TransformerMixin): def fit(self, X, y): return self def transform(self, X, y=None): return np.array(X, dtype="float32") / 255 class MudaShape(BaseEstimator, TransformerMixin): def fit(self, X, y): return self def transform(self, X, y=None): return X.reshape((-1, 28, 28, 1)) modelo = Pipeline( [("scaler", DividePor255()), ("reshape", MudaShape()), ("ann", RedeNeural())] ) modelo.fit(train_set, test_labels) ypred = modelo.predict(test_set)
false
0
1,845
0
2,078
1,845
129710876
# Import pandas for data manipulation and analysis import pandas as pd # Import numpy for numerical operations import numpy as np # Import machine learning models from sklearn.linear_model import Ridge, Lasso from sklearn.ensemble import RandomForestRegressor # Import evaluation metrics from sklearn.metrics import make_scorer, mean_squared_log_error, mean_squared_error # Import model selection tools from sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score # Import preprocessing tools from sklearn.preprocessing import LabelEncoder # Import visualization libraries import seaborn as sns import matplotlib.pyplot as plt import warnings # Import the warnings module to handle warnings warnings.filterwarnings("ignore") # Set the filterwarnings function to ignore warnings train_data = pd.read_csv("../input/the-price-of-art/train.csv") # Read the 'train.csv' file into a pandas DataFrame and assign it to the variable train_data test_data = pd.read_csv("../input/the-price-of-art/test.csv") # Read the 'test.csv' file into a pandas DataFrame and assign it to the variable test_data sample_data = pd.read_csv("../input/the-price-of-art/sample.csv") # Read the 'sample.csv' file into a pandas DataFrame and assign it to the variable sample_price # View the first few rows of the Training Dataset train_data.head() # Generate descriptive statistics of the Training Dataset train_data.describe() # View the concise summary of the Training Dataset train_data.info() # Calculate the correlation matrix corr = train_data.corr() # Create a heatmap visualization of the correlation matrix with a different color map sns.heatmap(corr, annot=True, cmap="viridis") # Set the title of the heatmap plt.title("Correlation Matrix") # Display the heatmap plt.show() # Create an instance of LabelEncoder le = LabelEncoder() # Encode the 'auction' column in the 'price_train' DataFrame temp = le.fit_transform(train_data["auction"]) # Encode the 'artist' column in the 'price_train' DataFrame temp1 = le.fit_transform(train_data["artist"]) # Encode the 'category' column in the 'price_train' DataFrame temp2 = le.fit_transform(train_data["category"]) # Encode the 'provenance' column in the 'price_train' DataFrame temp3 = le.fit_transform(train_data["provenance"]) # Encode the 'original_currency' column in the 'price_train' DataFrame temp4 = le.fit_transform(train_data["original_currency"]) # Encode the 'location' column in the 'price_train' DataFrame temp5 = le.fit_transform(train_data["location"]) # Encode the 'category' column in the 'price_test' DataFrame temp8 = le.fit_transform(test_data["category"]) # Encode the 'original_currency' column in the 'price_test' DataFrame temp9 = le.fit_transform(test_data["original_currency"]) # Encode the 'auction' column in the 'price_test' DataFrame temp10 = le.fit_transform(test_data["auction"]) # Encode the 'artist' column in the 'price_test' DataFrame temp11 = le.fit_transform(test_data["artist"]) # Encode the 'provenance' column in the 'price_test' DataFrame temp12 = le.fit_transform(test_data["provenance"]) # Encode the 'location' column in the 'price_test' DataFrame temp13 = le.fit_transform(test_data["location"]) # Remove specific columns from the 'train_data' DataFrame train_data.drop("artist", axis=1, inplace=True) train_data.drop("provenance", axis=1, inplace=True) train_data.drop("category", axis=1, inplace=True) train_data.drop("original_currency", axis=1, inplace=True) train_data.drop("location", axis=1, inplace=True) train_data.drop("auction", axis=1, inplace=True) # Remove specific columns from the 'test_data' DataFrame test_data.drop("original_currency", axis=1, inplace=True) test_data.drop("category", axis=1, inplace=True) test_data.drop("auction", axis=1, inplace=True) test_data.drop("artist", axis=1, inplace=True) test_data.drop("provenance", axis=1, inplace=True) test_data.drop("location", axis=1, inplace=True) # Assign the encoded values back to the 'train_data' DataFrame train_data["artist"] = temp1 train_data["category"] = temp2 train_data["provenance"] = temp3 train_data["original_currency"] = temp4 train_data["location"] = temp5 train_data["auction"] = temp # Assign the encoded values back to the 'test_data' DataFrame test_data["category"] = temp8 test_data["original_currency"] = temp9 test_data["auction"] = temp10 test_data["artist"] = temp11 test_data["provenance"] = temp12 test_data["location"] = temp13 # Convert 'height_cm' column to numeric train_data["height_cm"] = pd.to_numeric(train_data["height_cm"], errors="coerce") # Convert 'width_cm' column to numeric train_data["width_cm"] = pd.to_numeric(train_data["width_cm"], errors="coerce") # Calculate 'area' column by multiplying 'height_cm' and 'width_cm' train_data["area"] = train_data["height_cm"] * train_data["width_cm"] # Calculate the length of 'details' column and assign it to 'details' column train_data["details"] = train_data["details"].str.len() # Convert 'literature' column to 1 if not null, else 0 train_data["literature"] = np.where(train_data["literature"].isnull(), 0, 1) # Convert 'exhibited' column to 1 if not null, else 0 train_data["exhibited"] = np.where(train_data["exhibited"].isnull(), 0, 1) # Convert 'date' column to datetime train_data["date"] = pd.to_datetime(train_data["date"]) # Extract the year from 'date' column and assign it to 'year' column train_data["year"] = train_data["date"].dt.year # Drop the 'date', 'height_cm', and 'width_cm' columns from the 'train_data' DataFrame train_data.drop("date", axis=1, inplace=True) train_data.drop("height_cm", axis=1, inplace=True) train_data.drop("width_cm", axis=1, inplace=True) # Convert 'height_cm' column to numeric test_data["height_cm"] = pd.to_numeric(test_data["height_cm"], errors="coerce") # Convert 'width_cm' column to numeric test_data["width_cm"] = pd.to_numeric(test_data["width_cm"], errors="coerce") # Calculate 'area' column by multiplying 'height_cm' and 'width_cm' test_data["area"] = test_data["height_cm"] * test_data["width_cm"] # Calculate the length of 'details' column and assign it to 'details' column test_data["details"] = test_data["details"].str.len() # Convert 'literature' column to 1 if not null, else 0 test_data["literature"] = np.where(test_data["literature"].isnull(), 0, 1) # Convert 'exhibited' column to 1 if not null, else 0 test_data["exhibited"] = np.where(test_data["exhibited"].isnull(), 0, 1) # Convert 'date' column to datetime test_data["date"] = pd.to_datetime(test_data["date"]) # Extract the year from 'date' column and assign it to 'year' column test_data["year"] = test_data["date"].dt.year # Drop the 'date', 'height_cm', and 'width_cm' columns from the 'test_data' DataFrame test_data.drop("date", axis=1, inplace=True) test_data.drop("height_cm", axis=1, inplace=True) test_data.drop("width_cm", axis=1, inplace=True) # Define the features and target variables features = [ "literature", "exhibited", "estimate_low_usd", "estimate_high_usd", "year", "category", "location", ] X = train_data[features] y = train_data["price_realized_usd"] # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.4, random_state=356 ) # Create and train the Lasso regression model lasso = Lasso(alpha=10) lasso.fit(X_train, y_train) # Calculate the train and test scores train_score_ls = lasso.score(X_train, y_train) test_score_ls = lasso.score(X_test, y_test) # Print the train and test scores print("The train score for Lasso model is {}".format(train_score_ls)) print("The test score for Lasso model is {}".format(test_score_ls)) from sklearn.linear_model import LassoCV # Create and train the LassoCV model lasso_cv = LassoCV(alphas=[0.0001, 0.001, 0.01, 0.1, 1, 10], random_state=0).fit( X_train, y_train ) # Calculate the scores train_score_cv = lasso_cv.score(X_train, y_train) test_score_cv = lasso_cv.score(X_test, y_test) # Print the scores print("The train score for LassoCV model is {}".format(train_score_cv)) print("The test score for LassoCV model is {}".format(test_score_cv)) from sklearn.linear_model import Ridge # Create and train the Ridge regression model ridgeReg = Ridge(alpha=10) ridgeReg.fit(X_train, y_train) # Calculate the train and test scores train_score_ridge = ridgeReg.score(X_train, y_train) test_score_ridge = ridgeReg.score(X_test, y_test) # Print the scores print("The train score for Ridge model is {}".format(train_score_ridge)) print("The test score for Ridge model is {}".format(test_score_ridge)) from sklearn.ensemble import RandomForestRegressor # Create the Random Forest regression model with specified parameters rf = RandomForestRegressor( n_estimators=100, max_depth=10, min_samples_split=5, random_state=123 ) # Fit the model to the training data rf.fit(X_train, y_train) # The Random Forest model is now trained on the training data from sklearn.model_selection import GridSearchCV from sklearn.metrics import make_scorer, mean_squared_error # Define the hyperparameter grid to search over param_grid = {"alpha": [0.1, 1, 10, 100]} # Create model instances ridge = Ridge() lasso = Lasso() # Define the scoring metric as mean squared error scoring_metric = make_scorer(mean_squared_error, squared=False) # Perform a grid search over the hyperparameters using 10-fold cross-validation ridge_grid_search = GridSearchCV(ridge, param_grid, cv=10, scoring=scoring_metric) lasso_grid_search = GridSearchCV(lasso, param_grid, cv=10, scoring=scoring_metric) # Fit the grid search to the training data ridge_grid_search.fit(X_train, y_train) lasso_grid_search.fit(X_train, y_train) # Print the best hyperparameters and the corresponding mean squared error print("Ridge Regression:") print("Best hyperparameters: ", ridge_grid_search.best_params_) print("Best RMSLE: ", ridge_grid_search.best_score_) print("Lasso Regression:") print("Best hyperparameters: ", lasso_grid_search.best_params_) print("Best RMSLE: ", lasso_grid_search.best_score_) ridge_model = Ridge(alpha=ridge_grid_search.best_params_["alpha"]) ridge_model.fit(X_train, y_train) lasso_model = Lasso(alpha=lasso_grid_search.best_params_["alpha"]) lasso_model.fit(X_train, y_train) # Make predictions using the Ridge model ridge_pred = ridge_model.predict(X_test) # Make predictions using the Lasso model lasso_pred = lasso_model.predict(X_test) # Make predictions using the Random Forest model rf_pred = rf.predict(X_test) # Calculate the RMSLE for Ridge regression ridge_rmsle = np.mean((np.log1p(1 + ridge_pred) - np.log1p(1 + y_test)) ** 2) # Calculate the RMSLE for Lasso regression lasso_rmsle = np.mean((np.log1p(1 + lasso_pred) - np.log1p(1 + y_test)) ** 2) # Calculate the RMSLE for Random Forest regression rf_rmsle = np.mean((np.log1p(1 + rf_pred) - np.log1p(1 + y_test)) ** 2) # Print the RMSLE for all models print("Ridge regression RMSLE: ", ridge_rmsle) print("Lasso regression RMSLE: ", lasso_rmsle) print("Random Forest regression RMSLE: ", rf_rmsle) # Select features for the test data test_X = test_data[features] # Assign the 'object_id' column from test_data to test1 test1 = test_data["object_id"] # Use the trained random forest model to predict the target variable for the test data preds_test = rf.predict(test_X) # Create a DataFrame combining 'object_id' and predicted values output = pd.DataFrame({"object_id": test1, "price_realized_usd": preds_test}) # Display the first few rows of the DataFrame output.head() output.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/710/129710876.ipynb
null
null
[{"Id": 129710876, "ScriptId": 38573349, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12339056, "CreationDate": "05/16/2023 00:25:38", "VersionNumber": 1.0, "Title": "HW_MeghaJoseph", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 348.0, "LinesInsertedFromPrevious": 348.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Import pandas for data manipulation and analysis import pandas as pd # Import numpy for numerical operations import numpy as np # Import machine learning models from sklearn.linear_model import Ridge, Lasso from sklearn.ensemble import RandomForestRegressor # Import evaluation metrics from sklearn.metrics import make_scorer, mean_squared_log_error, mean_squared_error # Import model selection tools from sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score # Import preprocessing tools from sklearn.preprocessing import LabelEncoder # Import visualization libraries import seaborn as sns import matplotlib.pyplot as plt import warnings # Import the warnings module to handle warnings warnings.filterwarnings("ignore") # Set the filterwarnings function to ignore warnings train_data = pd.read_csv("../input/the-price-of-art/train.csv") # Read the 'train.csv' file into a pandas DataFrame and assign it to the variable train_data test_data = pd.read_csv("../input/the-price-of-art/test.csv") # Read the 'test.csv' file into a pandas DataFrame and assign it to the variable test_data sample_data = pd.read_csv("../input/the-price-of-art/sample.csv") # Read the 'sample.csv' file into a pandas DataFrame and assign it to the variable sample_price # View the first few rows of the Training Dataset train_data.head() # Generate descriptive statistics of the Training Dataset train_data.describe() # View the concise summary of the Training Dataset train_data.info() # Calculate the correlation matrix corr = train_data.corr() # Create a heatmap visualization of the correlation matrix with a different color map sns.heatmap(corr, annot=True, cmap="viridis") # Set the title of the heatmap plt.title("Correlation Matrix") # Display the heatmap plt.show() # Create an instance of LabelEncoder le = LabelEncoder() # Encode the 'auction' column in the 'price_train' DataFrame temp = le.fit_transform(train_data["auction"]) # Encode the 'artist' column in the 'price_train' DataFrame temp1 = le.fit_transform(train_data["artist"]) # Encode the 'category' column in the 'price_train' DataFrame temp2 = le.fit_transform(train_data["category"]) # Encode the 'provenance' column in the 'price_train' DataFrame temp3 = le.fit_transform(train_data["provenance"]) # Encode the 'original_currency' column in the 'price_train' DataFrame temp4 = le.fit_transform(train_data["original_currency"]) # Encode the 'location' column in the 'price_train' DataFrame temp5 = le.fit_transform(train_data["location"]) # Encode the 'category' column in the 'price_test' DataFrame temp8 = le.fit_transform(test_data["category"]) # Encode the 'original_currency' column in the 'price_test' DataFrame temp9 = le.fit_transform(test_data["original_currency"]) # Encode the 'auction' column in the 'price_test' DataFrame temp10 = le.fit_transform(test_data["auction"]) # Encode the 'artist' column in the 'price_test' DataFrame temp11 = le.fit_transform(test_data["artist"]) # Encode the 'provenance' column in the 'price_test' DataFrame temp12 = le.fit_transform(test_data["provenance"]) # Encode the 'location' column in the 'price_test' DataFrame temp13 = le.fit_transform(test_data["location"]) # Remove specific columns from the 'train_data' DataFrame train_data.drop("artist", axis=1, inplace=True) train_data.drop("provenance", axis=1, inplace=True) train_data.drop("category", axis=1, inplace=True) train_data.drop("original_currency", axis=1, inplace=True) train_data.drop("location", axis=1, inplace=True) train_data.drop("auction", axis=1, inplace=True) # Remove specific columns from the 'test_data' DataFrame test_data.drop("original_currency", axis=1, inplace=True) test_data.drop("category", axis=1, inplace=True) test_data.drop("auction", axis=1, inplace=True) test_data.drop("artist", axis=1, inplace=True) test_data.drop("provenance", axis=1, inplace=True) test_data.drop("location", axis=1, inplace=True) # Assign the encoded values back to the 'train_data' DataFrame train_data["artist"] = temp1 train_data["category"] = temp2 train_data["provenance"] = temp3 train_data["original_currency"] = temp4 train_data["location"] = temp5 train_data["auction"] = temp # Assign the encoded values back to the 'test_data' DataFrame test_data["category"] = temp8 test_data["original_currency"] = temp9 test_data["auction"] = temp10 test_data["artist"] = temp11 test_data["provenance"] = temp12 test_data["location"] = temp13 # Convert 'height_cm' column to numeric train_data["height_cm"] = pd.to_numeric(train_data["height_cm"], errors="coerce") # Convert 'width_cm' column to numeric train_data["width_cm"] = pd.to_numeric(train_data["width_cm"], errors="coerce") # Calculate 'area' column by multiplying 'height_cm' and 'width_cm' train_data["area"] = train_data["height_cm"] * train_data["width_cm"] # Calculate the length of 'details' column and assign it to 'details' column train_data["details"] = train_data["details"].str.len() # Convert 'literature' column to 1 if not null, else 0 train_data["literature"] = np.where(train_data["literature"].isnull(), 0, 1) # Convert 'exhibited' column to 1 if not null, else 0 train_data["exhibited"] = np.where(train_data["exhibited"].isnull(), 0, 1) # Convert 'date' column to datetime train_data["date"] = pd.to_datetime(train_data["date"]) # Extract the year from 'date' column and assign it to 'year' column train_data["year"] = train_data["date"].dt.year # Drop the 'date', 'height_cm', and 'width_cm' columns from the 'train_data' DataFrame train_data.drop("date", axis=1, inplace=True) train_data.drop("height_cm", axis=1, inplace=True) train_data.drop("width_cm", axis=1, inplace=True) # Convert 'height_cm' column to numeric test_data["height_cm"] = pd.to_numeric(test_data["height_cm"], errors="coerce") # Convert 'width_cm' column to numeric test_data["width_cm"] = pd.to_numeric(test_data["width_cm"], errors="coerce") # Calculate 'area' column by multiplying 'height_cm' and 'width_cm' test_data["area"] = test_data["height_cm"] * test_data["width_cm"] # Calculate the length of 'details' column and assign it to 'details' column test_data["details"] = test_data["details"].str.len() # Convert 'literature' column to 1 if not null, else 0 test_data["literature"] = np.where(test_data["literature"].isnull(), 0, 1) # Convert 'exhibited' column to 1 if not null, else 0 test_data["exhibited"] = np.where(test_data["exhibited"].isnull(), 0, 1) # Convert 'date' column to datetime test_data["date"] = pd.to_datetime(test_data["date"]) # Extract the year from 'date' column and assign it to 'year' column test_data["year"] = test_data["date"].dt.year # Drop the 'date', 'height_cm', and 'width_cm' columns from the 'test_data' DataFrame test_data.drop("date", axis=1, inplace=True) test_data.drop("height_cm", axis=1, inplace=True) test_data.drop("width_cm", axis=1, inplace=True) # Define the features and target variables features = [ "literature", "exhibited", "estimate_low_usd", "estimate_high_usd", "year", "category", "location", ] X = train_data[features] y = train_data["price_realized_usd"] # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.4, random_state=356 ) # Create and train the Lasso regression model lasso = Lasso(alpha=10) lasso.fit(X_train, y_train) # Calculate the train and test scores train_score_ls = lasso.score(X_train, y_train) test_score_ls = lasso.score(X_test, y_test) # Print the train and test scores print("The train score for Lasso model is {}".format(train_score_ls)) print("The test score for Lasso model is {}".format(test_score_ls)) from sklearn.linear_model import LassoCV # Create and train the LassoCV model lasso_cv = LassoCV(alphas=[0.0001, 0.001, 0.01, 0.1, 1, 10], random_state=0).fit( X_train, y_train ) # Calculate the scores train_score_cv = lasso_cv.score(X_train, y_train) test_score_cv = lasso_cv.score(X_test, y_test) # Print the scores print("The train score for LassoCV model is {}".format(train_score_cv)) print("The test score for LassoCV model is {}".format(test_score_cv)) from sklearn.linear_model import Ridge # Create and train the Ridge regression model ridgeReg = Ridge(alpha=10) ridgeReg.fit(X_train, y_train) # Calculate the train and test scores train_score_ridge = ridgeReg.score(X_train, y_train) test_score_ridge = ridgeReg.score(X_test, y_test) # Print the scores print("The train score for Ridge model is {}".format(train_score_ridge)) print("The test score for Ridge model is {}".format(test_score_ridge)) from sklearn.ensemble import RandomForestRegressor # Create the Random Forest regression model with specified parameters rf = RandomForestRegressor( n_estimators=100, max_depth=10, min_samples_split=5, random_state=123 ) # Fit the model to the training data rf.fit(X_train, y_train) # The Random Forest model is now trained on the training data from sklearn.model_selection import GridSearchCV from sklearn.metrics import make_scorer, mean_squared_error # Define the hyperparameter grid to search over param_grid = {"alpha": [0.1, 1, 10, 100]} # Create model instances ridge = Ridge() lasso = Lasso() # Define the scoring metric as mean squared error scoring_metric = make_scorer(mean_squared_error, squared=False) # Perform a grid search over the hyperparameters using 10-fold cross-validation ridge_grid_search = GridSearchCV(ridge, param_grid, cv=10, scoring=scoring_metric) lasso_grid_search = GridSearchCV(lasso, param_grid, cv=10, scoring=scoring_metric) # Fit the grid search to the training data ridge_grid_search.fit(X_train, y_train) lasso_grid_search.fit(X_train, y_train) # Print the best hyperparameters and the corresponding mean squared error print("Ridge Regression:") print("Best hyperparameters: ", ridge_grid_search.best_params_) print("Best RMSLE: ", ridge_grid_search.best_score_) print("Lasso Regression:") print("Best hyperparameters: ", lasso_grid_search.best_params_) print("Best RMSLE: ", lasso_grid_search.best_score_) ridge_model = Ridge(alpha=ridge_grid_search.best_params_["alpha"]) ridge_model.fit(X_train, y_train) lasso_model = Lasso(alpha=lasso_grid_search.best_params_["alpha"]) lasso_model.fit(X_train, y_train) # Make predictions using the Ridge model ridge_pred = ridge_model.predict(X_test) # Make predictions using the Lasso model lasso_pred = lasso_model.predict(X_test) # Make predictions using the Random Forest model rf_pred = rf.predict(X_test) # Calculate the RMSLE for Ridge regression ridge_rmsle = np.mean((np.log1p(1 + ridge_pred) - np.log1p(1 + y_test)) ** 2) # Calculate the RMSLE for Lasso regression lasso_rmsle = np.mean((np.log1p(1 + lasso_pred) - np.log1p(1 + y_test)) ** 2) # Calculate the RMSLE for Random Forest regression rf_rmsle = np.mean((np.log1p(1 + rf_pred) - np.log1p(1 + y_test)) ** 2) # Print the RMSLE for all models print("Ridge regression RMSLE: ", ridge_rmsle) print("Lasso regression RMSLE: ", lasso_rmsle) print("Random Forest regression RMSLE: ", rf_rmsle) # Select features for the test data test_X = test_data[features] # Assign the 'object_id' column from test_data to test1 test1 = test_data["object_id"] # Use the trained random forest model to predict the target variable for the test data preds_test = rf.predict(test_X) # Create a DataFrame combining 'object_id' and predicted values output = pd.DataFrame({"object_id": test1, "price_realized_usd": preds_test}) # Display the first few rows of the DataFrame output.head() output.to_csv("submission.csv", index=False)
false
0
3,603
0
3,603
3,603
129684642
import numpy as np # import pandas as pd # This line imports the pandas library, which is a popular data mainuplation and analysis library in python, it is commonly used for working with tabular data. from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # **Project Overview** # In this project, we will be analyzing a dataset of Airbnb listings in New York City to gain insights about the rental market. We will start by performing data cleaning and preprocessing, followed by exploratory data analysis to understand the characteristics of the listings. Then, we will build a machine learning model to predict the rental prices of the listings based on their features. Finally, we will interpret the model's results and draw conclusions about the rental market. # **Data Preparation** # The dataset we will be using is available on Kaggle. We will begin by importing the necessary libraries and loading the dataset into a pandas DataFrame: # This line reads the dataset file,and loads its contents into a DataFrame variable named'data' # the read_csv() function is a pandas function use to read CSV (comma-separated values)files. data = pd.read_csv("../input/airbnb-nyc-data/AnBNYC2019.csv") data.head(10) # this line will show the first 5 rows of the dataset. # **We can then perform some basic data exploration to understand the structure of the dataset:** print(data.head()) print(data.info()) print(data.describe()) # **Data Cleaning and Preprocessing** # We will now clean and preprocess the data to prepare it for analysis. First, we will remove any unnecessary columns: print(data.columns) # **Next, we will handle missing values:** avg_price_neighbourhood = data.groupby("neighbourhood_group")["price"].mean() # this line perform a groupby operation on the data DATAFRAME. # It groups the data based on the unique values in the 'neighbourhood_group' column. # The groupby() function returns a grouped DataFrame Object # The ['price'] part of the code specifies that we are interested in the price column of the DataFrame. # Finally, the 'mean' function is applied to calculate the mean (averafe) price for each group. # The result is stored in the 'avg_price_neighbourhood' variable print(avg_price_neighbourhood) # this line prints the calculated average prices for each neighbourhood group. # The 'print' function is used to display the output to the console. # by executing these lines of code, you group by the data by the variable you want to use, calculate the aeverage price for each group, and display the results. # Fill missing values in the 'name' column with 'Unknown' data["name"].fillna("Unknown", inplace=True) # Fill missing values in the 'host_name' column with 'Unknown' data["host_name"].fillna("Unknown", inplace=True) # Fill missing values in the 'neighbourhood_group' column with 'Unknown' data["neighbourhood_group"].fillna("Unknown", inplace=True) # Fill missing values in the 'reviews_per_month' column with 0 data["reviews_per_month"].fillna(0, inplace=True) data = pd.get_dummies(data, columns=["neighbourhood_group", "room_type"]) scaler = StandardScaler() data[ [ "latitude", "longitude", "price", "minimum_nights", "number_of_reviews", "reviews_per_month", "calculated_host_listings_count", "availability_365", ] ] = scaler.fit_transform( data[ [ "latitude", "longitude", "price", "minimum_nights", "number_of_reviews", "reviews_per_month", "calculated_host_listings_count", "availability_365", ] ] ) # **Visualizing Price Distribution:** # To understand the distribution of rental prices, we can use a histogram plot. The code snippet below demonstrates how to create a histogram using Seaborn library (sns.histplot()) and visualize the distribution of prices: sns.histplot(data["price"], bins=50) plt.title("Price Distribution") plt.show() # This visualization helps stakeholders understand the spread of rental prices and identify the most common price range. It provides insights into the affordability and price range that customers can expect when searching for rentals. # Next, we will visualize the relationship between the rental price and other features: sns.pairplot( data[ [ "price", "latitude", "longitude", "minimum_nights", "number_of_reviews", "reviews_per_month", "calculated_host_listings_count", "availability_365", ] ] ) plt.title("Pairwise Relationships") plt.show() # This visualization shows the relationship between the rental price and several other features of the listings, such as their location (latitude and longitude), minimum number of nights required to stay, number of reviews received, and availability throughout the year. # Each scatter plot in the pair plot represents the relationship between price and one of the selected features. It helps identify any patterns or correlations between price and variables like location, minimum nights, reviews, host listings count, and availability. # # For example, we can see that listings with a higher price tend to be located in certain areas of the city, or have a higher minimum number of nights required to book. # This visualization can help stakeholders understand the factors that affect the rental price of a listing, and can be useful in making decisions about pricing and marketing strategies. It can also provide insights into what features guests value the most and are willing to pay more for, which can inform decisions about property improvements and upgrades. # **Analyzing Average Price by Neighbourhood Group:** # We can calculate the average price of rentals for each neighbourhood group and visualize it using a bar plot. The code snippet below demonstrates this: avg_price_neighbourhood = ( data.groupby("neighbourhood_group")["price"].mean().reset_index() ) plt.figure(figsize=(10, 6)) sns.barplot(x="neighbourhood_group", y="price", data=avg_price_neighbourhood) plt.xlabel("Neighbourhood Group") plt.ylabel("Average Price") plt.title("Average Price by Neighbourhood Group") plt.show() # This visualization helps stakeholders understand the price differences among different neighbourhood groups. It provides insights into which neighbourhood groups tend to have higher or lower average rental prices, which can be valuable for understanding the market dynamics and making informed decisions about property investments and rental strategies. # **Distribution of Room Types:** # We can visualize the distribution of different room types available in the dataset using a bar plot. The code snippet below demonstrates how to create this visualization: room_type_counts = data["room_type"].value_counts() plt.figure(figsize=(8, 6)) sns.barplot(x=room_type_counts.index, y=room_type_counts.values) plt.xlabel("Room Type") plt.ylabel("Count") plt.title("Distribution of Room Types") plt.show() # This visualization provides insights into the proportion of each room type (e.g., entire home/apartment, private room, shared room) in the dataset. It can help stakeholders understand the composition of listings and the preferences of guests when it comes to room types. # **Average Price by Room Type:** # We can calculate the average rental price for each room type and visualize it using a bar plot. The code snippet below demonstrates how to create this visualization: avg_price_room_type = data.groupby("room_type")["price"].mean().reset_index() plt.figure(figsize=(8, 6)) sns.barplot(x="room_type", y="price", data=avg_price_room_type) plt.xlabel("Room Type") plt.ylabel("Average Price") plt.title("Average Price by Room Type") plt.show() # This visualization helps stakeholders understand the price differences among different room types. It provides insights into which room types generally command higher or lower average rental prices, which can be useful for pricing strategies and targeting specific guest preferences. # **Availability throughout the Year:** # We can analyze the availability of listings throughout the year using a line plot. The code snippet below demonstrates how to create this visualization: availability_year = ( data.groupby("neighbourhood_group")["availability_365"].mean().reset_index() ) plt.figure(figsize=(10, 6)) sns.lineplot(x="neighbourhood_group", y="availability_365", data=availability_year) plt.xlabel("Neighbourhood Group") plt.ylabel("Average Availability (in days)") plt.title("Availability throughout the Year") plt.show() # This visualization provides insights into the average availability of listings in different neighbourhood groups over the course of a year. It helps stakeholders understand the patterns of availability and can be useful for planning promotions, managing occupancy rates, and optimizing revenue. # # Machine learning prediction process using the dataset. I will be using a linear regression model to predict the rental prices based on the available features. # # Data Preprocessing for the machine learning # Select relevant features for prediction selected_features = [ "latitude", "longitude", "minimum_nights", "number_of_reviews", "reviews_per_month", "calculated_host_listings_count", "availability_365", "price", ] data = data[selected_features] # Drop rows with missing values data.dropna(inplace=True) # We select the relevant features that we want to use for prediction. In this case, I chose 'latitude', 'longitude', 'minimum_nights', 'number_of_reviews', 'reviews_per_month', 'calculated_host_listings_count', 'availability_365', and the target variable 'price'. # We update the data DataFrame to include only the selected features using the indexing operator []. # To ensure the quality of our dataset again, we drop any rows with missing values using the dropna() function. # # Feature Scaling: # Separate the features and target variable X = data.drop("price", axis=1) y = data["price"] # Perform feature scaling scaler = StandardScaler() X_scaled = scaler.fit_transform(X) # We separate the features (X) from the target variable (y) by dropping the 'price' column from the data DataFrame and assigning it to X, while assigning the 'price' column to y. # Next, we create an instance of the StandardScaler class and store it in the scaler variable. # We perform feature scaling on the features (X) using the fit_transform() method of the StandardScaler object. This scales the features to have zero mean and unit variance, ensuring that all features are on a similar scale. # # Model Training and Evaluation: from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.metrics import accuracy_score # Split the dataset into training and test sets X_train, X_test, y_train, y_test = train_test_split( X_scaled, y, test_size=0.2, random_state=42 ) # Create and train the linear regression model model = LinearRegression() model.fit(X_train, y_train) # Make predictions on the test set y_pred = model.predict(X_test) # Evaluate the model mse = mean_squared_error(y_test, y_pred) rmse = mean_squared_error(y_test, y_pred, squared=False) r2 = r2_score(y_test, y_pred) # Print the evaluation metrics print("Mean Squared Error (MSE):", mse) print("Root Mean Squared Error (RMSE):", rmse) print("R-squared (R2) Score:", r2)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/684/129684642.ipynb
null
null
[{"Id": 129684642, "ScriptId": 38508052, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6455124, "CreationDate": "05/15/2023 18:17:42", "VersionNumber": 2.0, "Title": "AirBnB", "EvaluationDate": "05/15/2023", "IsChange": false, "TotalLines": 227.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 227.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # import pandas as pd # This line imports the pandas library, which is a popular data mainuplation and analysis library in python, it is commonly used for working with tabular data. from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # **Project Overview** # In this project, we will be analyzing a dataset of Airbnb listings in New York City to gain insights about the rental market. We will start by performing data cleaning and preprocessing, followed by exploratory data analysis to understand the characteristics of the listings. Then, we will build a machine learning model to predict the rental prices of the listings based on their features. Finally, we will interpret the model's results and draw conclusions about the rental market. # **Data Preparation** # The dataset we will be using is available on Kaggle. We will begin by importing the necessary libraries and loading the dataset into a pandas DataFrame: # This line reads the dataset file,and loads its contents into a DataFrame variable named'data' # the read_csv() function is a pandas function use to read CSV (comma-separated values)files. data = pd.read_csv("../input/airbnb-nyc-data/AnBNYC2019.csv") data.head(10) # this line will show the first 5 rows of the dataset. # **We can then perform some basic data exploration to understand the structure of the dataset:** print(data.head()) print(data.info()) print(data.describe()) # **Data Cleaning and Preprocessing** # We will now clean and preprocess the data to prepare it for analysis. First, we will remove any unnecessary columns: print(data.columns) # **Next, we will handle missing values:** avg_price_neighbourhood = data.groupby("neighbourhood_group")["price"].mean() # this line perform a groupby operation on the data DATAFRAME. # It groups the data based on the unique values in the 'neighbourhood_group' column. # The groupby() function returns a grouped DataFrame Object # The ['price'] part of the code specifies that we are interested in the price column of the DataFrame. # Finally, the 'mean' function is applied to calculate the mean (averafe) price for each group. # The result is stored in the 'avg_price_neighbourhood' variable print(avg_price_neighbourhood) # this line prints the calculated average prices for each neighbourhood group. # The 'print' function is used to display the output to the console. # by executing these lines of code, you group by the data by the variable you want to use, calculate the aeverage price for each group, and display the results. # Fill missing values in the 'name' column with 'Unknown' data["name"].fillna("Unknown", inplace=True) # Fill missing values in the 'host_name' column with 'Unknown' data["host_name"].fillna("Unknown", inplace=True) # Fill missing values in the 'neighbourhood_group' column with 'Unknown' data["neighbourhood_group"].fillna("Unknown", inplace=True) # Fill missing values in the 'reviews_per_month' column with 0 data["reviews_per_month"].fillna(0, inplace=True) data = pd.get_dummies(data, columns=["neighbourhood_group", "room_type"]) scaler = StandardScaler() data[ [ "latitude", "longitude", "price", "minimum_nights", "number_of_reviews", "reviews_per_month", "calculated_host_listings_count", "availability_365", ] ] = scaler.fit_transform( data[ [ "latitude", "longitude", "price", "minimum_nights", "number_of_reviews", "reviews_per_month", "calculated_host_listings_count", "availability_365", ] ] ) # **Visualizing Price Distribution:** # To understand the distribution of rental prices, we can use a histogram plot. The code snippet below demonstrates how to create a histogram using Seaborn library (sns.histplot()) and visualize the distribution of prices: sns.histplot(data["price"], bins=50) plt.title("Price Distribution") plt.show() # This visualization helps stakeholders understand the spread of rental prices and identify the most common price range. It provides insights into the affordability and price range that customers can expect when searching for rentals. # Next, we will visualize the relationship between the rental price and other features: sns.pairplot( data[ [ "price", "latitude", "longitude", "minimum_nights", "number_of_reviews", "reviews_per_month", "calculated_host_listings_count", "availability_365", ] ] ) plt.title("Pairwise Relationships") plt.show() # This visualization shows the relationship between the rental price and several other features of the listings, such as their location (latitude and longitude), minimum number of nights required to stay, number of reviews received, and availability throughout the year. # Each scatter plot in the pair plot represents the relationship between price and one of the selected features. It helps identify any patterns or correlations between price and variables like location, minimum nights, reviews, host listings count, and availability. # # For example, we can see that listings with a higher price tend to be located in certain areas of the city, or have a higher minimum number of nights required to book. # This visualization can help stakeholders understand the factors that affect the rental price of a listing, and can be useful in making decisions about pricing and marketing strategies. It can also provide insights into what features guests value the most and are willing to pay more for, which can inform decisions about property improvements and upgrades. # **Analyzing Average Price by Neighbourhood Group:** # We can calculate the average price of rentals for each neighbourhood group and visualize it using a bar plot. The code snippet below demonstrates this: avg_price_neighbourhood = ( data.groupby("neighbourhood_group")["price"].mean().reset_index() ) plt.figure(figsize=(10, 6)) sns.barplot(x="neighbourhood_group", y="price", data=avg_price_neighbourhood) plt.xlabel("Neighbourhood Group") plt.ylabel("Average Price") plt.title("Average Price by Neighbourhood Group") plt.show() # This visualization helps stakeholders understand the price differences among different neighbourhood groups. It provides insights into which neighbourhood groups tend to have higher or lower average rental prices, which can be valuable for understanding the market dynamics and making informed decisions about property investments and rental strategies. # **Distribution of Room Types:** # We can visualize the distribution of different room types available in the dataset using a bar plot. The code snippet below demonstrates how to create this visualization: room_type_counts = data["room_type"].value_counts() plt.figure(figsize=(8, 6)) sns.barplot(x=room_type_counts.index, y=room_type_counts.values) plt.xlabel("Room Type") plt.ylabel("Count") plt.title("Distribution of Room Types") plt.show() # This visualization provides insights into the proportion of each room type (e.g., entire home/apartment, private room, shared room) in the dataset. It can help stakeholders understand the composition of listings and the preferences of guests when it comes to room types. # **Average Price by Room Type:** # We can calculate the average rental price for each room type and visualize it using a bar plot. The code snippet below demonstrates how to create this visualization: avg_price_room_type = data.groupby("room_type")["price"].mean().reset_index() plt.figure(figsize=(8, 6)) sns.barplot(x="room_type", y="price", data=avg_price_room_type) plt.xlabel("Room Type") plt.ylabel("Average Price") plt.title("Average Price by Room Type") plt.show() # This visualization helps stakeholders understand the price differences among different room types. It provides insights into which room types generally command higher or lower average rental prices, which can be useful for pricing strategies and targeting specific guest preferences. # **Availability throughout the Year:** # We can analyze the availability of listings throughout the year using a line plot. The code snippet below demonstrates how to create this visualization: availability_year = ( data.groupby("neighbourhood_group")["availability_365"].mean().reset_index() ) plt.figure(figsize=(10, 6)) sns.lineplot(x="neighbourhood_group", y="availability_365", data=availability_year) plt.xlabel("Neighbourhood Group") plt.ylabel("Average Availability (in days)") plt.title("Availability throughout the Year") plt.show() # This visualization provides insights into the average availability of listings in different neighbourhood groups over the course of a year. It helps stakeholders understand the patterns of availability and can be useful for planning promotions, managing occupancy rates, and optimizing revenue. # # Machine learning prediction process using the dataset. I will be using a linear regression model to predict the rental prices based on the available features. # # Data Preprocessing for the machine learning # Select relevant features for prediction selected_features = [ "latitude", "longitude", "minimum_nights", "number_of_reviews", "reviews_per_month", "calculated_host_listings_count", "availability_365", "price", ] data = data[selected_features] # Drop rows with missing values data.dropna(inplace=True) # We select the relevant features that we want to use for prediction. In this case, I chose 'latitude', 'longitude', 'minimum_nights', 'number_of_reviews', 'reviews_per_month', 'calculated_host_listings_count', 'availability_365', and the target variable 'price'. # We update the data DataFrame to include only the selected features using the indexing operator []. # To ensure the quality of our dataset again, we drop any rows with missing values using the dropna() function. # # Feature Scaling: # Separate the features and target variable X = data.drop("price", axis=1) y = data["price"] # Perform feature scaling scaler = StandardScaler() X_scaled = scaler.fit_transform(X) # We separate the features (X) from the target variable (y) by dropping the 'price' column from the data DataFrame and assigning it to X, while assigning the 'price' column to y. # Next, we create an instance of the StandardScaler class and store it in the scaler variable. # We perform feature scaling on the features (X) using the fit_transform() method of the StandardScaler object. This scales the features to have zero mean and unit variance, ensuring that all features are on a similar scale. # # Model Training and Evaluation: from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.metrics import accuracy_score # Split the dataset into training and test sets X_train, X_test, y_train, y_test = train_test_split( X_scaled, y, test_size=0.2, random_state=42 ) # Create and train the linear regression model model = LinearRegression() model.fit(X_train, y_train) # Make predictions on the test set y_pred = model.predict(X_test) # Evaluate the model mse = mean_squared_error(y_test, y_pred) rmse = mean_squared_error(y_test, y_pred, squared=False) r2 = r2_score(y_test, y_pred) # Print the evaluation metrics print("Mean Squared Error (MSE):", mse) print("Root Mean Squared Error (RMSE):", rmse) print("R-squared (R2) Score:", r2)
false
0
2,985
0
2,985
2,985
129684411
<jupyter_start><jupyter_text>Car Price Prediction Multiple Linear Regression ### Problem Statement A Chinese automobile company Geely Auto aspires to enter the US market by setting up their manufacturing unit there and producing cars locally to give competition to their US and European counterparts. They have contracted an automobile consulting company to understand the factors on which the pricing of cars depends. Specifically, they want to understand the factors affecting the pricing of cars in the American market, since those may be very different from the Chinese market. The company wants to know: Which variables are significant in predicting the price of a car How well those variables describe the price of a car Based on various market surveys, the consulting firm has gathered a large data set of different types of cars across the America market. ### Business Goal We are required to model the price of cars with the available independent variables. It will be used by the management to understand how exactly the prices vary with the independent variables. They can accordingly manipulate the design of the cars, the business strategy etc. to meet certain price levels. Further, the model will be a good way for management to understand the pricing dynamics of a new market. ### Please Note : The dataset provided is for learning purpose. Please don’t draw any inference with real world scenario. Kaggle dataset identifier: car-price-prediction <jupyter_code>import pandas as pd df = pd.read_csv('car-price-prediction/CarPrice_Assignment.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 205 entries, 0 to 204 Data columns (total 26 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 car_ID 205 non-null int64 1 symboling 205 non-null int64 2 CarName 205 non-null object 3 fueltype 205 non-null object 4 aspiration 205 non-null object 5 doornumber 205 non-null object 6 carbody 205 non-null object 7 drivewheel 205 non-null object 8 enginelocation 205 non-null object 9 wheelbase 205 non-null float64 10 carlength 205 non-null float64 11 carwidth 205 non-null float64 12 carheight 205 non-null float64 13 curbweight 205 non-null int64 14 enginetype 205 non-null object 15 cylindernumber 205 non-null object 16 enginesize 205 non-null int64 17 fuelsystem 205 non-null object 18 boreratio 205 non-null float64 19 stroke 205 non-null float64 20 compressionratio 205 non-null float64 21 horsepower 205 non-null int64 22 peakrpm 205 non-null int64 23 citympg 205 non-null int64 24 highwaympg 205 non-null int64 25 price 205 non-null float64 dtypes: float64(8), int64(8), object(10) memory usage: 41.8+ KB <jupyter_text>Examples: { "car_ID": 1, "symboling": 3, "CarName": "alfa-romero giulia", "fueltype": "gas", "aspiration": "std", "doornumber": "two", "carbody": "convertible", "drivewheel": "rwd", "enginelocation": "front", "wheelbase": 88.6, "carlength": 168.8, "carwidth": 64.1, "carheight": 48.8, "curbweight": 2548, "enginetype": "dohc", "cylindernumber": "four", "enginesize": 130, "fuelsystem": "mpfi", "boreratio": 3.47, "stroke": 2.68, "...": "and 6 more columns" } { "car_ID": 2, "symboling": 3, "CarName": "alfa-romero stelvio", "fueltype": "gas", "aspiration": "std", "doornumber": "two", "carbody": "convertible", "drivewheel": "rwd", "enginelocation": "front", "wheelbase": 88.6, "carlength": 168.8, "carwidth": 64.1, "carheight": 48.8, "curbweight": 2548, "enginetype": "dohc", "cylindernumber": "four", "enginesize": 130, "fuelsystem": "mpfi", "boreratio": 3.47, "stroke": 2.68, "...": "and 6 more columns" } { "car_ID": 3, "symboling": 1, "CarName": "alfa-romero Quadrifoglio", "fueltype": "gas", "aspiration": "std", "doornumber": "two", "carbody": "hatchback", "drivewheel": "rwd", "enginelocation": "front", "wheelbase": 94.5, "carlength": 171.2, "carwidth": 65.5, "carheight": 52.4, "curbweight": 2823, "enginetype": "ohcv", "cylindernumber": "six", "enginesize": 152, "fuelsystem": "mpfi", "boreratio": 2.68, "stroke": 3.47, "...": "and 6 more columns" } { "car_ID": 4, "symboling": 2, "CarName": "audi 100 ls", "fueltype": "gas", "aspiration": "std", "doornumber": "four", "carbody": "sedan", "drivewheel": "fwd", "enginelocation": "front", "wheelbase": 99.8, "carlength": 176.6, "carwidth": 66.2, "carheight": 54.3, "curbweight": 2337, "enginetype": "ohc", "cylindernumber": "four", "enginesize": 109, "fuelsystem": "mpfi", "boreratio": 3.19, "stroke": 3.4, "...": "and 6 more columns" } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from statsmodels.stats.outliers_influence import variance_inflation_factor df = pd.read_csv("/kaggle/input/car-price-prediction/CarPrice_Assignment.csv") df.head() # ## Data Cleaning ### Checking for null data df.isnull().sum() # No Null data, so there is no need for value imputation # checking list of columns df.columns.to_list() # Dropping car_id since its unnecessary df = df.drop("car_ID", axis=1) # to check simple statistic print("the length of the data is : ", len(df)) df.describe() # combining carlength + carwidth + carheight as car_volume df["car_volume"] = df["carlength"] * df["carwidth"] * df["carheight"] df = df.drop(columns=["carlength", "carwidth", "carheight"]) # Removing Outliers numerical_columns = df.select_dtypes(exclude=["object"]).columns def remove_outliers(df, cols): Q1 = df[cols].quantile(0.25) Q3 = df[cols].quantile(0.75) IQR = Q3 - Q1 lower_bound = Q1 - 1.5 * IQR upper_bound = Q3 + 1.5 * IQR return df[~((df[cols] < lower_bound) | (df[cols] > upper_bound)).any(axis=1)] df = remove_outliers(df, numerical_columns) df_copy = df # ### Categorical Variable ## Working with categorical columns ## CarName ## df.select_dtypes(include=["object"]) print(df["CarName"].value_counts()) print("\n ") print( " We have 102 variation for 139 dataset. It seems that CarName format is Car_Brand *space* Car_type We will be using Car Brand instead of car type to gave more generalized view" ) df["CarBrand"] = df["CarName"].str.split(" ").str[0] ##droping the CarName Columns df = df.drop(columns=["CarName"], axis=1) ## fueltype print(df["fueltype"].value_counts()) print(" \n") print( "It seems that the option always gas, and hence there is no other variation and other distinguishable categories, hence we will drop the columns" ) df = df.drop(columns=["fueltype"], axis=1) ##aspiration print(df["aspiration"].value_counts()) ## we will use get_dummies for this ## door_number door_number_replacement = {"two": 2, "four": 4} df = df.replace({"door_number": door_number_replacement}) print("we are replacing door because it can be represented as number /") ##carbody df["carbody"].unique() ## we will use dummies for this df["drivewheel"].unique() ## we will use dummies for this print(df["enginelocation"].unique()) df = df.drop(columns=["enginelocation"], axis=1) print(" \n") print( "We will drop this because no other variation and other distinguishable categories, hence we will drop the columns" ) print(df["enginetype"].unique()) # we will use one_hot_encoding for this print(df["cylindernumber"].unique()) cylindernumber_replacement = {"four": 4, "six": 6, "five": 5, "two": 2} df = df.replace({"cylindernumber": cylindernumber_replacement}) print("\n") print("We replace cylindernumber from string to integer") print(df["fuelsystem"].unique()) # we willuse dummies for this # Final dataset df.head() # ## VIF Checking numeric = df.select_dtypes(exclude=["object"]) numeric = numeric.drop("price", axis=1) dropped_vars = [] for i in range(14): vif = pd.DataFrame() vif["Variable"] = numeric.columns vif["VIF"] = [ variance_inflation_factor(numeric.values, i) for i in range(numeric.shape[1]) ] if (vif["VIF"].max()) >= 10: new_columns = ( vif.sort_values(by="VIF", ascending=False)[1:]["Variable"].unique().tolist() ) print(vif.sort_values(by="VIF", ascending=False)) print("\n \n \n \n \n ") dropped_vars.append(vif.sort_values(by="VIF", ascending=False).iloc[0, 0]) numeric = numeric[new_columns] final_columns = vif["Variable"].unique().tolist() print(vif) df = df.select_dtypes(include=["object"]).join(df[final_columns]) df = df.join(df_copy["price"]) ##get_dummies for categorical variabel categorical_columns = df.select_dtypes(include=["object"]).columns for i in categorical_columns: dummies_creator = pd.get_dummies(df[i], drop_first=True, prefix=i) df = df.join(dummies_creator) df = df.drop(columns=[i]) # ### Min_Max_Scaling def build_min_max_scaler(df): scaled_columns = pd.DataFrame() for i in df.columns: min = df[i].min() df[i] = df[i] - min max = df[i].max() scaled_columns[i] = pd.DataFrame(round(df[i] / max, 5)) return scaled_columns df_new = build_min_max_scaler(df.drop("price", axis=1)) df = df_new.join(df["price"]) # # Building Model from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, mean_absolute_error import statsmodels.api as sm X = df.drop("price", axis=1) y = df["price"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=15062001 ) X = sm.add_constant(X_train) model = sm.OLS(y_train, X_train) results = model.fit() # Print the summary of the regression results summary = results.summary() print(results.summary()) y_pred = results.predict(X_test) mse = mean_absolute_error(y_test, y_pred) mpe = abs(np.mean((y_test - y_pred) / y_test)) * 100 print("Mean Squarred Error :", mse) print("Mean Percentage Error : ", mpe) tables = pd.read_html(summary.tables[1].as_html(), header=0, index_col=0) coefficients_table = tables[0] sorted_table = coefficients_table.sort_values(by="P>|t|", ascending=False) pd.DataFrame(sorted_table)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/684/129684411.ipynb
car-price-prediction
hellbuoy
[{"Id": 129684411, "ScriptId": 38556113, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5162494, "CreationDate": "05/15/2023 18:15:17", "VersionNumber": 4.0, "Title": "Multiple Linear Regression", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 227.0, "LinesInsertedFromPrevious": 53.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 174.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 186006664, "KernelVersionId": 129684411, "SourceDatasetVersionId": 741735}]
[{"Id": 741735, "DatasetId": 383055, "DatasourceVersionId": 762363, "CreatorUserId": 2318606, "LicenseName": "Unknown", "CreationDate": "10/15/2019 16:45:27", "VersionNumber": 1.0, "Title": "Car Price Prediction Multiple Linear Regression", "Slug": "car-price-prediction", "Subtitle": "Predicting the Prices of cars using RFE and VIF", "Description": "### Problem Statement\n\nA Chinese automobile company Geely Auto aspires to enter the US market by setting up their manufacturing unit there and producing cars locally to give competition to their US and European counterparts. \n\n \n\nThey have contracted an automobile consulting company to understand the factors on which the pricing of cars depends. Specifically, they want to understand the factors affecting the pricing of cars in the American market, since those may be very different from the Chinese market. The company wants to know:\n\nWhich variables are significant in predicting the price of a car\nHow well those variables describe the price of a car\nBased on various market surveys, the consulting firm has gathered a large data set of different types of cars across the America market. \n\n\n### Business Goal\n\nWe are required to model the price of cars with the available independent variables. It will be used by the management to understand how exactly the prices vary with the independent variables. They can accordingly manipulate the design of the cars, the business strategy etc. to meet certain price levels. Further, the model will be a good way for management to understand the pricing dynamics of a new market. \n\n### Please Note : The dataset provided is for learning purpose. Please don\u2019t draw any inference with real world scenario.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 383055, "CreatorUserId": 2318606, "OwnerUserId": 2318606.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 741735.0, "CurrentDatasourceVersionId": 762363.0, "ForumId": 395004, "Type": 2, "CreationDate": "10/15/2019 16:45:27", "LastActivityDate": "10/15/2019", "TotalViews": 339360, "TotalDownloads": 50133, "TotalVotes": 491, "TotalKernels": 345}]
[{"Id": 2318606, "UserName": "hellbuoy", "DisplayName": "Manish Kumar", "RegisterDate": "10/03/2018", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from statsmodels.stats.outliers_influence import variance_inflation_factor df = pd.read_csv("/kaggle/input/car-price-prediction/CarPrice_Assignment.csv") df.head() # ## Data Cleaning ### Checking for null data df.isnull().sum() # No Null data, so there is no need for value imputation # checking list of columns df.columns.to_list() # Dropping car_id since its unnecessary df = df.drop("car_ID", axis=1) # to check simple statistic print("the length of the data is : ", len(df)) df.describe() # combining carlength + carwidth + carheight as car_volume df["car_volume"] = df["carlength"] * df["carwidth"] * df["carheight"] df = df.drop(columns=["carlength", "carwidth", "carheight"]) # Removing Outliers numerical_columns = df.select_dtypes(exclude=["object"]).columns def remove_outliers(df, cols): Q1 = df[cols].quantile(0.25) Q3 = df[cols].quantile(0.75) IQR = Q3 - Q1 lower_bound = Q1 - 1.5 * IQR upper_bound = Q3 + 1.5 * IQR return df[~((df[cols] < lower_bound) | (df[cols] > upper_bound)).any(axis=1)] df = remove_outliers(df, numerical_columns) df_copy = df # ### Categorical Variable ## Working with categorical columns ## CarName ## df.select_dtypes(include=["object"]) print(df["CarName"].value_counts()) print("\n ") print( " We have 102 variation for 139 dataset. It seems that CarName format is Car_Brand *space* Car_type We will be using Car Brand instead of car type to gave more generalized view" ) df["CarBrand"] = df["CarName"].str.split(" ").str[0] ##droping the CarName Columns df = df.drop(columns=["CarName"], axis=1) ## fueltype print(df["fueltype"].value_counts()) print(" \n") print( "It seems that the option always gas, and hence there is no other variation and other distinguishable categories, hence we will drop the columns" ) df = df.drop(columns=["fueltype"], axis=1) ##aspiration print(df["aspiration"].value_counts()) ## we will use get_dummies for this ## door_number door_number_replacement = {"two": 2, "four": 4} df = df.replace({"door_number": door_number_replacement}) print("we are replacing door because it can be represented as number /") ##carbody df["carbody"].unique() ## we will use dummies for this df["drivewheel"].unique() ## we will use dummies for this print(df["enginelocation"].unique()) df = df.drop(columns=["enginelocation"], axis=1) print(" \n") print( "We will drop this because no other variation and other distinguishable categories, hence we will drop the columns" ) print(df["enginetype"].unique()) # we will use one_hot_encoding for this print(df["cylindernumber"].unique()) cylindernumber_replacement = {"four": 4, "six": 6, "five": 5, "two": 2} df = df.replace({"cylindernumber": cylindernumber_replacement}) print("\n") print("We replace cylindernumber from string to integer") print(df["fuelsystem"].unique()) # we willuse dummies for this # Final dataset df.head() # ## VIF Checking numeric = df.select_dtypes(exclude=["object"]) numeric = numeric.drop("price", axis=1) dropped_vars = [] for i in range(14): vif = pd.DataFrame() vif["Variable"] = numeric.columns vif["VIF"] = [ variance_inflation_factor(numeric.values, i) for i in range(numeric.shape[1]) ] if (vif["VIF"].max()) >= 10: new_columns = ( vif.sort_values(by="VIF", ascending=False)[1:]["Variable"].unique().tolist() ) print(vif.sort_values(by="VIF", ascending=False)) print("\n \n \n \n \n ") dropped_vars.append(vif.sort_values(by="VIF", ascending=False).iloc[0, 0]) numeric = numeric[new_columns] final_columns = vif["Variable"].unique().tolist() print(vif) df = df.select_dtypes(include=["object"]).join(df[final_columns]) df = df.join(df_copy["price"]) ##get_dummies for categorical variabel categorical_columns = df.select_dtypes(include=["object"]).columns for i in categorical_columns: dummies_creator = pd.get_dummies(df[i], drop_first=True, prefix=i) df = df.join(dummies_creator) df = df.drop(columns=[i]) # ### Min_Max_Scaling def build_min_max_scaler(df): scaled_columns = pd.DataFrame() for i in df.columns: min = df[i].min() df[i] = df[i] - min max = df[i].max() scaled_columns[i] = pd.DataFrame(round(df[i] / max, 5)) return scaled_columns df_new = build_min_max_scaler(df.drop("price", axis=1)) df = df_new.join(df["price"]) # # Building Model from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, mean_absolute_error import statsmodels.api as sm X = df.drop("price", axis=1) y = df["price"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=15062001 ) X = sm.add_constant(X_train) model = sm.OLS(y_train, X_train) results = model.fit() # Print the summary of the regression results summary = results.summary() print(results.summary()) y_pred = results.predict(X_test) mse = mean_absolute_error(y_test, y_pred) mpe = abs(np.mean((y_test - y_pred) / y_test)) * 100 print("Mean Squarred Error :", mse) print("Mean Percentage Error : ", mpe) tables = pd.read_html(summary.tables[1].as_html(), header=0, index_col=0) coefficients_table = tables[0] sorted_table = coefficients_table.sort_values(by="P>|t|", ascending=False) pd.DataFrame(sorted_table)
[{"car-price-prediction/CarPrice_Assignment.csv": {"column_names": "[\"car_ID\", \"symboling\", \"CarName\", \"fueltype\", \"aspiration\", \"doornumber\", \"carbody\", \"drivewheel\", \"enginelocation\", \"wheelbase\", \"carlength\", \"carwidth\", \"carheight\", \"curbweight\", \"enginetype\", \"cylindernumber\", \"enginesize\", \"fuelsystem\", \"boreratio\", \"stroke\", \"compressionratio\", \"horsepower\", \"peakrpm\", \"citympg\", \"highwaympg\", \"price\"]", "column_data_types": "{\"car_ID\": \"int64\", \"symboling\": \"int64\", \"CarName\": \"object\", \"fueltype\": \"object\", \"aspiration\": \"object\", \"doornumber\": \"object\", \"carbody\": \"object\", \"drivewheel\": \"object\", \"enginelocation\": \"object\", \"wheelbase\": \"float64\", \"carlength\": \"float64\", \"carwidth\": \"float64\", \"carheight\": \"float64\", \"curbweight\": \"int64\", \"enginetype\": \"object\", \"cylindernumber\": \"object\", \"enginesize\": \"int64\", \"fuelsystem\": \"object\", \"boreratio\": \"float64\", \"stroke\": \"float64\", \"compressionratio\": \"float64\", \"horsepower\": \"int64\", \"peakrpm\": \"int64\", \"citympg\": \"int64\", \"highwaympg\": \"int64\", \"price\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 205 entries, 0 to 204\nData columns (total 26 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 car_ID 205 non-null int64 \n 1 symboling 205 non-null int64 \n 2 CarName 205 non-null object \n 3 fueltype 205 non-null object \n 4 aspiration 205 non-null object \n 5 doornumber 205 non-null object \n 6 carbody 205 non-null object \n 7 drivewheel 205 non-null object \n 8 enginelocation 205 non-null object \n 9 wheelbase 205 non-null float64\n 10 carlength 205 non-null float64\n 11 carwidth 205 non-null float64\n 12 carheight 205 non-null float64\n 13 curbweight 205 non-null int64 \n 14 enginetype 205 non-null object \n 15 cylindernumber 205 non-null object \n 16 enginesize 205 non-null int64 \n 17 fuelsystem 205 non-null object \n 18 boreratio 205 non-null float64\n 19 stroke 205 non-null float64\n 20 compressionratio 205 non-null float64\n 21 horsepower 205 non-null int64 \n 22 peakrpm 205 non-null int64 \n 23 citympg 205 non-null int64 \n 24 highwaympg 205 non-null int64 \n 25 price 205 non-null float64\ndtypes: float64(8), int64(8), object(10)\nmemory usage: 41.8+ KB\n", "summary": "{\"car_ID\": {\"count\": 205.0, \"mean\": 103.0, \"std\": 59.32256456582661, \"min\": 1.0, \"25%\": 52.0, \"50%\": 103.0, \"75%\": 154.0, \"max\": 205.0}, \"symboling\": {\"count\": 205.0, \"mean\": 0.8341463414634146, \"std\": 1.2453068281055297, \"min\": -2.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 3.0}, \"wheelbase\": {\"count\": 205.0, \"mean\": 98.75658536585367, \"std\": 6.021775685025571, \"min\": 86.6, \"25%\": 94.5, \"50%\": 97.0, \"75%\": 102.4, \"max\": 120.9}, \"carlength\": {\"count\": 205.0, \"mean\": 174.04926829268288, \"std\": 12.33728852655518, \"min\": 141.1, \"25%\": 166.3, \"50%\": 173.2, \"75%\": 183.1, \"max\": 208.1}, \"carwidth\": {\"count\": 205.0, \"mean\": 65.90780487804878, \"std\": 2.145203852687183, \"min\": 60.3, \"25%\": 64.1, \"50%\": 65.5, \"75%\": 66.9, \"max\": 72.3}, \"carheight\": {\"count\": 205.0, \"mean\": 53.72487804878049, \"std\": 2.4435219699049036, \"min\": 47.8, \"25%\": 52.0, \"50%\": 54.1, \"75%\": 55.5, \"max\": 59.8}, \"curbweight\": {\"count\": 205.0, \"mean\": 2555.5658536585365, \"std\": 520.6802035016387, \"min\": 1488.0, \"25%\": 2145.0, \"50%\": 2414.0, \"75%\": 2935.0, \"max\": 4066.0}, \"enginesize\": {\"count\": 205.0, \"mean\": 126.90731707317073, \"std\": 41.64269343817984, \"min\": 61.0, \"25%\": 97.0, \"50%\": 120.0, \"75%\": 141.0, \"max\": 326.0}, \"boreratio\": {\"count\": 205.0, \"mean\": 3.329756097560975, \"std\": 0.27084370542622926, \"min\": 2.54, \"25%\": 3.15, \"50%\": 3.31, \"75%\": 3.58, \"max\": 3.94}, \"stroke\": {\"count\": 205.0, \"mean\": 3.255414634146341, \"std\": 0.31359701376080407, \"min\": 2.07, \"25%\": 3.11, \"50%\": 3.29, \"75%\": 3.41, \"max\": 4.17}, \"compressionratio\": {\"count\": 205.0, \"mean\": 10.142536585365855, \"std\": 3.972040321863298, \"min\": 7.0, \"25%\": 8.6, \"50%\": 9.0, \"75%\": 9.4, \"max\": 23.0}, \"horsepower\": {\"count\": 205.0, \"mean\": 104.1170731707317, \"std\": 39.54416680936116, \"min\": 48.0, \"25%\": 70.0, \"50%\": 95.0, \"75%\": 116.0, \"max\": 288.0}, \"peakrpm\": {\"count\": 205.0, \"mean\": 5125.121951219512, \"std\": 476.98564305694634, \"min\": 4150.0, \"25%\": 4800.0, \"50%\": 5200.0, \"75%\": 5500.0, \"max\": 6600.0}, \"citympg\": {\"count\": 205.0, \"mean\": 25.21951219512195, \"std\": 6.542141653001622, \"min\": 13.0, \"25%\": 19.0, \"50%\": 24.0, \"75%\": 30.0, \"max\": 49.0}, \"highwaympg\": {\"count\": 205.0, \"mean\": 30.75121951219512, \"std\": 6.886443130941824, \"min\": 16.0, \"25%\": 25.0, \"50%\": 30.0, \"75%\": 34.0, \"max\": 54.0}, \"price\": {\"count\": 205.0, \"mean\": 13276.710570731706, \"std\": 7988.85233174315, \"min\": 5118.0, \"25%\": 7788.0, \"50%\": 10295.0, \"75%\": 16503.0, \"max\": 45400.0}}", "examples": "{\"car_ID\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"symboling\":{\"0\":3,\"1\":3,\"2\":1,\"3\":2},\"CarName\":{\"0\":\"alfa-romero giulia\",\"1\":\"alfa-romero stelvio\",\"2\":\"alfa-romero Quadrifoglio\",\"3\":\"audi 100 ls\"},\"fueltype\":{\"0\":\"gas\",\"1\":\"gas\",\"2\":\"gas\",\"3\":\"gas\"},\"aspiration\":{\"0\":\"std\",\"1\":\"std\",\"2\":\"std\",\"3\":\"std\"},\"doornumber\":{\"0\":\"two\",\"1\":\"two\",\"2\":\"two\",\"3\":\"four\"},\"carbody\":{\"0\":\"convertible\",\"1\":\"convertible\",\"2\":\"hatchback\",\"3\":\"sedan\"},\"drivewheel\":{\"0\":\"rwd\",\"1\":\"rwd\",\"2\":\"rwd\",\"3\":\"fwd\"},\"enginelocation\":{\"0\":\"front\",\"1\":\"front\",\"2\":\"front\",\"3\":\"front\"},\"wheelbase\":{\"0\":88.6,\"1\":88.6,\"2\":94.5,\"3\":99.8},\"carlength\":{\"0\":168.8,\"1\":168.8,\"2\":171.2,\"3\":176.6},\"carwidth\":{\"0\":64.1,\"1\":64.1,\"2\":65.5,\"3\":66.2},\"carheight\":{\"0\":48.8,\"1\":48.8,\"2\":52.4,\"3\":54.3},\"curbweight\":{\"0\":2548,\"1\":2548,\"2\":2823,\"3\":2337},\"enginetype\":{\"0\":\"dohc\",\"1\":\"dohc\",\"2\":\"ohcv\",\"3\":\"ohc\"},\"cylindernumber\":{\"0\":\"four\",\"1\":\"four\",\"2\":\"six\",\"3\":\"four\"},\"enginesize\":{\"0\":130,\"1\":130,\"2\":152,\"3\":109},\"fuelsystem\":{\"0\":\"mpfi\",\"1\":\"mpfi\",\"2\":\"mpfi\",\"3\":\"mpfi\"},\"boreratio\":{\"0\":3.47,\"1\":3.47,\"2\":2.68,\"3\":3.19},\"stroke\":{\"0\":2.68,\"1\":2.68,\"2\":3.47,\"3\":3.4},\"compressionratio\":{\"0\":9.0,\"1\":9.0,\"2\":9.0,\"3\":10.0},\"horsepower\":{\"0\":111,\"1\":111,\"2\":154,\"3\":102},\"peakrpm\":{\"0\":5000,\"1\":5000,\"2\":5000,\"3\":5500},\"citympg\":{\"0\":21,\"1\":21,\"2\":19,\"3\":24},\"highwaympg\":{\"0\":27,\"1\":27,\"2\":26,\"3\":30},\"price\":{\"0\":13495.0,\"1\":16500.0,\"2\":16500.0,\"3\":13950.0}}"}}]
true
1
<start_data_description><data_path>car-price-prediction/CarPrice_Assignment.csv: <column_names> ['car_ID', 'symboling', 'CarName', 'fueltype', 'aspiration', 'doornumber', 'carbody', 'drivewheel', 'enginelocation', 'wheelbase', 'carlength', 'carwidth', 'carheight', 'curbweight', 'enginetype', 'cylindernumber', 'enginesize', 'fuelsystem', 'boreratio', 'stroke', 'compressionratio', 'horsepower', 'peakrpm', 'citympg', 'highwaympg', 'price'] <column_types> {'car_ID': 'int64', 'symboling': 'int64', 'CarName': 'object', 'fueltype': 'object', 'aspiration': 'object', 'doornumber': 'object', 'carbody': 'object', 'drivewheel': 'object', 'enginelocation': 'object', 'wheelbase': 'float64', 'carlength': 'float64', 'carwidth': 'float64', 'carheight': 'float64', 'curbweight': 'int64', 'enginetype': 'object', 'cylindernumber': 'object', 'enginesize': 'int64', 'fuelsystem': 'object', 'boreratio': 'float64', 'stroke': 'float64', 'compressionratio': 'float64', 'horsepower': 'int64', 'peakrpm': 'int64', 'citympg': 'int64', 'highwaympg': 'int64', 'price': 'float64'} <dataframe_Summary> {'car_ID': {'count': 205.0, 'mean': 103.0, 'std': 59.32256456582661, 'min': 1.0, '25%': 52.0, '50%': 103.0, '75%': 154.0, 'max': 205.0}, 'symboling': {'count': 205.0, 'mean': 0.8341463414634146, 'std': 1.2453068281055297, 'min': -2.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 3.0}, 'wheelbase': {'count': 205.0, 'mean': 98.75658536585367, 'std': 6.021775685025571, 'min': 86.6, '25%': 94.5, '50%': 97.0, '75%': 102.4, 'max': 120.9}, 'carlength': {'count': 205.0, 'mean': 174.04926829268288, 'std': 12.33728852655518, 'min': 141.1, '25%': 166.3, '50%': 173.2, '75%': 183.1, 'max': 208.1}, 'carwidth': {'count': 205.0, 'mean': 65.90780487804878, 'std': 2.145203852687183, 'min': 60.3, '25%': 64.1, '50%': 65.5, '75%': 66.9, 'max': 72.3}, 'carheight': {'count': 205.0, 'mean': 53.72487804878049, 'std': 2.4435219699049036, 'min': 47.8, '25%': 52.0, '50%': 54.1, '75%': 55.5, 'max': 59.8}, 'curbweight': {'count': 205.0, 'mean': 2555.5658536585365, 'std': 520.6802035016387, 'min': 1488.0, '25%': 2145.0, '50%': 2414.0, '75%': 2935.0, 'max': 4066.0}, 'enginesize': {'count': 205.0, 'mean': 126.90731707317073, 'std': 41.64269343817984, 'min': 61.0, '25%': 97.0, '50%': 120.0, '75%': 141.0, 'max': 326.0}, 'boreratio': {'count': 205.0, 'mean': 3.329756097560975, 'std': 0.27084370542622926, 'min': 2.54, '25%': 3.15, '50%': 3.31, '75%': 3.58, 'max': 3.94}, 'stroke': {'count': 205.0, 'mean': 3.255414634146341, 'std': 0.31359701376080407, 'min': 2.07, '25%': 3.11, '50%': 3.29, '75%': 3.41, 'max': 4.17}, 'compressionratio': {'count': 205.0, 'mean': 10.142536585365855, 'std': 3.972040321863298, 'min': 7.0, '25%': 8.6, '50%': 9.0, '75%': 9.4, 'max': 23.0}, 'horsepower': {'count': 205.0, 'mean': 104.1170731707317, 'std': 39.54416680936116, 'min': 48.0, '25%': 70.0, '50%': 95.0, '75%': 116.0, 'max': 288.0}, 'peakrpm': {'count': 205.0, 'mean': 5125.121951219512, 'std': 476.98564305694634, 'min': 4150.0, '25%': 4800.0, '50%': 5200.0, '75%': 5500.0, 'max': 6600.0}, 'citympg': {'count': 205.0, 'mean': 25.21951219512195, 'std': 6.542141653001622, 'min': 13.0, '25%': 19.0, '50%': 24.0, '75%': 30.0, 'max': 49.0}, 'highwaympg': {'count': 205.0, 'mean': 30.75121951219512, 'std': 6.886443130941824, 'min': 16.0, '25%': 25.0, '50%': 30.0, '75%': 34.0, 'max': 54.0}, 'price': {'count': 205.0, 'mean': 13276.710570731706, 'std': 7988.85233174315, 'min': 5118.0, '25%': 7788.0, '50%': 10295.0, '75%': 16503.0, 'max': 45400.0}} <dataframe_info> RangeIndex: 205 entries, 0 to 204 Data columns (total 26 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 car_ID 205 non-null int64 1 symboling 205 non-null int64 2 CarName 205 non-null object 3 fueltype 205 non-null object 4 aspiration 205 non-null object 5 doornumber 205 non-null object 6 carbody 205 non-null object 7 drivewheel 205 non-null object 8 enginelocation 205 non-null object 9 wheelbase 205 non-null float64 10 carlength 205 non-null float64 11 carwidth 205 non-null float64 12 carheight 205 non-null float64 13 curbweight 205 non-null int64 14 enginetype 205 non-null object 15 cylindernumber 205 non-null object 16 enginesize 205 non-null int64 17 fuelsystem 205 non-null object 18 boreratio 205 non-null float64 19 stroke 205 non-null float64 20 compressionratio 205 non-null float64 21 horsepower 205 non-null int64 22 peakrpm 205 non-null int64 23 citympg 205 non-null int64 24 highwaympg 205 non-null int64 25 price 205 non-null float64 dtypes: float64(8), int64(8), object(10) memory usage: 41.8+ KB <some_examples> {'car_ID': {'0': 1, '1': 2, '2': 3, '3': 4}, 'symboling': {'0': 3, '1': 3, '2': 1, '3': 2}, 'CarName': {'0': 'alfa-romero giulia', '1': 'alfa-romero stelvio', '2': 'alfa-romero Quadrifoglio', '3': 'audi 100 ls'}, 'fueltype': {'0': 'gas', '1': 'gas', '2': 'gas', '3': 'gas'}, 'aspiration': {'0': 'std', '1': 'std', '2': 'std', '3': 'std'}, 'doornumber': {'0': 'two', '1': 'two', '2': 'two', '3': 'four'}, 'carbody': {'0': 'convertible', '1': 'convertible', '2': 'hatchback', '3': 'sedan'}, 'drivewheel': {'0': 'rwd', '1': 'rwd', '2': 'rwd', '3': 'fwd'}, 'enginelocation': {'0': 'front', '1': 'front', '2': 'front', '3': 'front'}, 'wheelbase': {'0': 88.6, '1': 88.6, '2': 94.5, '3': 99.8}, 'carlength': {'0': 168.8, '1': 168.8, '2': 171.2, '3': 176.6}, 'carwidth': {'0': 64.1, '1': 64.1, '2': 65.5, '3': 66.2}, 'carheight': {'0': 48.8, '1': 48.8, '2': 52.4, '3': 54.3}, 'curbweight': {'0': 2548, '1': 2548, '2': 2823, '3': 2337}, 'enginetype': {'0': 'dohc', '1': 'dohc', '2': 'ohcv', '3': 'ohc'}, 'cylindernumber': {'0': 'four', '1': 'four', '2': 'six', '3': 'four'}, 'enginesize': {'0': 130, '1': 130, '2': 152, '3': 109}, 'fuelsystem': {'0': 'mpfi', '1': 'mpfi', '2': 'mpfi', '3': 'mpfi'}, 'boreratio': {'0': 3.47, '1': 3.47, '2': 2.68, '3': 3.19}, 'stroke': {'0': 2.68, '1': 2.68, '2': 3.47, '3': 3.4}, 'compressionratio': {'0': 9.0, '1': 9.0, '2': 9.0, '3': 10.0}, 'horsepower': {'0': 111, '1': 111, '2': 154, '3': 102}, 'peakrpm': {'0': 5000, '1': 5000, '2': 5000, '3': 5500}, 'citympg': {'0': 21, '1': 21, '2': 19, '3': 24}, 'highwaympg': {'0': 27, '1': 27, '2': 26, '3': 30}, 'price': {'0': 13495.0, '1': 16500.0, '2': 16500.0, '3': 13950.0}} <end_description>
1,933
1
3,641
1,933
129684500
<jupyter_start><jupyter_text>Bank Customer Churn RowNumber—corresponds to the record (row) number and has no effect on the output. CustomerId—contains random values and has no effect on customer leaving the bank. Surname—the surname of a customer has no impact on their decision to leave the bank. CreditScore—can have an effect on customer churn, since a customer with a higher credit score is less likely to leave the bank. Geography—a customer’s location can affect their decision to leave the bank. Gender—it’s interesting to explore whether gender plays a role in a customer leaving the bank. Age—this is certainly relevant, since older customers are less likely to leave their bank than younger ones. Tenure—refers to the number of years that the customer has been a client of the bank. Normally, older clients are more loyal and less likely to leave a bank. Balance—also a very good indicator of customer churn, as people with a higher balance in their accounts are less likely to leave the bank compared to those with lower balances. NumOfProducts—refers to the number of products that a customer has purchased through the bank. HasCrCard—denotes whether or not a customer has a credit card. This column is also relevant, since people with a credit card are less likely to leave the bank. IsActiveMember—active customers are less likely to leave the bank. EstimatedSalary—as with balance, people with lower salaries are more likely to leave the bank compared to those with higher salaries. Exited—whether or not the customer left the bank. Complain—customer has complaint or not. Satisfaction Score—Score provided by the customer for their complaint resolution. Card Type—type of card hold by the customer. Points Earned—the points earned by the customer for using credit card. Acknowledgements As we know, it is much more expensive to sign in a new client than keeping an existing one. It is advantageous for banks to know what leads a client towards the decision to leave the company. Churn prevention allows companies to develop loyalty programs and retention campaigns to keep as many customers as possible. Kaggle dataset identifier: bank-customer-churn <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv("/kaggle/input/bank-customer-churn/Customer-Churn-Records.csv") df.head() df = df.drop(["RowNumber", "CustomerId", "Surname"], axis=1) df.shape df.isnull().sum() df["Gender"].unique() df["Card Type"].unique() # ***Categorical feautures are:*** # 1. Gender # 2. HasCrCard # 3. IsActiveMember # 4. Exited # 5. Complain # 6. Card Type df1 = df.groupby("Complain")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Exited", ascending=False) sns.barplot(data=df1, x="Complain", y="Exited", order=df1.Complain, color="#FF8C01") plt.xlabel("Has complained or not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Complain history") plt.show() df1 = df.groupby("HasCrCard")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Exited", ascending=False) sns.barplot(data=df1, x="HasCrCard", y="Exited", order=df1.HasCrCard, color="#FF8C01") plt.xlabel("Has Credit Card or not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Credit Card Holder or Not") plt.show() df1 = ( df.groupby("IsActiveMember")["Exited"] .apply(lambda x: (x == 1).mean()) .reset_index() ) df1 = df1.sort_values("Exited", ascending=False) sns.barplot( data=df1, x="IsActiveMember", y="Exited", order=df1.IsActiveMember, color="#FF8C01" ) plt.xlabel("Is Active Member or Not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Activity of the Customer") plt.show() # The plot tells that if a bank customer is not active then there is a tendency for the customer to leave the bank. # Card Type df1 = df.groupby("Card Type")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Card Type", ascending=False) sns.barplot( data=df1, x="Card Type", y="Exited", order=df1["Card Type"], color="#FF8C01" ) plt.xlabel("Type of Card Holder") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Credit Card TYpe") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/684/129684500.ipynb
bank-customer-churn
radheshyamkollipara
[{"Id": 129684500, "ScriptId": 38564486, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8103914, "CreationDate": "05/15/2023 18:16:15", "VersionNumber": 1.0, "Title": "notebookb3e9a432fe", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 97.0, "LinesInsertedFromPrevious": 97.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186006829, "KernelVersionId": 129684500, "SourceDatasetVersionId": 5550559}]
[{"Id": 5550559, "DatasetId": 3197960, "DatasourceVersionId": 5625285, "CreatorUserId": 14862076, "LicenseName": "Other (specified in description)", "CreationDate": "04/28/2023 16:32:01", "VersionNumber": 1.0, "Title": "Bank Customer Churn", "Slug": "bank-customer-churn", "Subtitle": "Bank Customer Data for Customer Churn", "Description": "RowNumber\u2014corresponds to the record (row) number and has no effect on the output.\nCustomerId\u2014contains random values and has no effect on customer leaving the bank.\nSurname\u2014the surname of a customer has no impact on their decision to leave the bank.\nCreditScore\u2014can have an effect on customer churn, since a customer with a higher credit score is less likely to leave the bank.\nGeography\u2014a customer\u2019s location can affect their decision to leave the bank.\nGender\u2014it\u2019s interesting to explore whether gender plays a role in a customer leaving the bank.\nAge\u2014this is certainly relevant, since older customers are less likely to leave their bank than younger ones.\nTenure\u2014refers to the number of years that the customer has been a client of the bank. Normally, older clients are more loyal and less likely to leave a bank.\nBalance\u2014also a very good indicator of customer churn, as people with a higher balance in their accounts are less likely to leave the bank compared to those with lower balances.\nNumOfProducts\u2014refers to the number of products that a customer has purchased through the bank.\nHasCrCard\u2014denotes whether or not a customer has a credit card. This column is also relevant, since people with a credit card are less likely to leave the bank.\nIsActiveMember\u2014active customers are less likely to leave the bank.\nEstimatedSalary\u2014as with balance, people with lower salaries are more likely to leave the bank compared to those with higher salaries.\nExited\u2014whether or not the customer left the bank.\nComplain\u2014customer has complaint or not.\nSatisfaction Score\u2014Score provided by the customer for their complaint resolution.\nCard Type\u2014type of card hold by the customer.\nPoints Earned\u2014the points earned by the customer for using credit card.\n\nAcknowledgements\n\nAs we know, it is much more expensive to sign in a new client than keeping an existing one.\n\nIt is advantageous for banks to know what leads a client towards the decision to leave the company.\n\nChurn prevention allows companies to develop loyalty programs and retention campaigns to keep as many customers as possible.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3197960, "CreatorUserId": 14862076, "OwnerUserId": 14862076.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5550559.0, "CurrentDatasourceVersionId": 5625285.0, "ForumId": 3262570, "Type": 2, "CreationDate": "04/28/2023 16:32:01", "LastActivityDate": "04/28/2023", "TotalViews": 39315, "TotalDownloads": 6814, "TotalVotes": 97, "TotalKernels": 52}]
[{"Id": 14862076, "UserName": "radheshyamkollipara", "DisplayName": "Radheshyam Kollipara", "RegisterDate": "04/28/2023", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv("/kaggle/input/bank-customer-churn/Customer-Churn-Records.csv") df.head() df = df.drop(["RowNumber", "CustomerId", "Surname"], axis=1) df.shape df.isnull().sum() df["Gender"].unique() df["Card Type"].unique() # ***Categorical feautures are:*** # 1. Gender # 2. HasCrCard # 3. IsActiveMember # 4. Exited # 5. Complain # 6. Card Type df1 = df.groupby("Complain")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Exited", ascending=False) sns.barplot(data=df1, x="Complain", y="Exited", order=df1.Complain, color="#FF8C01") plt.xlabel("Has complained or not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Complain history") plt.show() df1 = df.groupby("HasCrCard")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Exited", ascending=False) sns.barplot(data=df1, x="HasCrCard", y="Exited", order=df1.HasCrCard, color="#FF8C01") plt.xlabel("Has Credit Card or not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Credit Card Holder or Not") plt.show() df1 = ( df.groupby("IsActiveMember")["Exited"] .apply(lambda x: (x == 1).mean()) .reset_index() ) df1 = df1.sort_values("Exited", ascending=False) sns.barplot( data=df1, x="IsActiveMember", y="Exited", order=df1.IsActiveMember, color="#FF8C01" ) plt.xlabel("Is Active Member or Not") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Activity of the Customer") plt.show() # The plot tells that if a bank customer is not active then there is a tendency for the customer to leave the bank. # Card Type df1 = df.groupby("Card Type")["Exited"].apply(lambda x: (x == 1).mean()).reset_index() df1 = df1.sort_values("Card Type", ascending=False) sns.barplot( data=df1, x="Card Type", y="Exited", order=df1["Card Type"], color="#FF8C01" ) plt.xlabel("Type of Card Holder") plt.ylabel("Proportion of Customer Churn") plt.title(f"Likelihood of Customer Churn by Credit Card TYpe") plt.show()
false
1
916
0
1,417
916
129684606
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # vusialize the data import pandas as pd data = pd.read_csv( r"/kaggle/input/combine-all-the-data-from-lte-dataset-into-one/LTE combined data.csv" ) data.head() data["path"].replace( {"bus": 0, "car": 1, "pedestrian": 2, "static": 3, "train": 4}, inplace=True ) data.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/684/129684606.ipynb
null
null
[{"Id": 129684606, "ScriptId": 38563297, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13315094, "CreationDate": "05/15/2023 18:17:20", "VersionNumber": 1.0, "Title": "LTE heat map", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 34.0, "LinesInsertedFromPrevious": 34.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # vusialize the data import pandas as pd data = pd.read_csv( r"/kaggle/input/combine-all-the-data-from-lte-dataset-into-one/LTE combined data.csv" ) data.head() data["path"].replace( {"bus": 0, "car": 1, "pedestrian": 2, "static": 3, "train": 4}, inplace=True ) data.head()
false
0
295
0
295
295
129684938
<jupyter_start><jupyter_text>Glassdoor- Analyze Gender Pay Gap ### Context Want to know the base pay for different job roles, then this data set will be useful. ### About the data set: The data set has been taken from glassdoor and focuses on income for various job titles based on gender. As there have been many studies showcasing that women are paid less than men for the same job titles, this data set will be helpful in identifying the depth of the gender-based pay gap. The features of the data set are: Job Title Gender Age PerfEval Education Dept Seniority Base Pay, and Bonus Kaggle dataset identifier: glassdoor-analyze-gender-pay-gap <jupyter_script># # Examination of the Gender Pay Gap # ### Summary: This notebook analyzes the Glassdoor data set that a variety of variables related to gender and pay. # The data set was taken via Kaggle from the site Glassdoor. It focuses on income for various job titles, as well as listing bonuses and potential confounding factors such as age. As there have been many studies showcasing that women are paid less than men for the same job titles, this data set will be helpful in exploring factors relating to or discovering a gender-based pay gap. This data was last updated in 2020. # | Field | Description | # |-----------|-----------------------------------------------------| # | Job Title | Marketing Associate, Software Engineer, or "Other" | # | Gender | Male or Female | # | Age | Age in years | # | PerfEval | Performance Evaluation score | # | Education | Level of Education | # | Dept | Department | # | Seniority | Number of years worked | # | BasePay | Annual Basic Pay in $ | # | Bonus | Annual Bonus Pay | # ## Data Input & Cleaning import pandas as pd import matplotlib.pyplot as plt import statistics as stats import math as math pd.read_csv( "/kaggle/input/glassdoor-analyze-gender-pay-gap/Glassdoor Gender Pay Gap.csv" ) # ### Variable Nomenclature & Basic Dataset Understanding # In order to shorten the title of the data set to more readily use analysis tools, shorten 'Gender Pay Gap.csv' to gpg. filepath = "/kaggle/input/glassdoor-analyze-gender-pay-gap/Glassdoor Gender Pay Gap.csv" gpg = pd.read_csv(filepath) # Method to re-import data (for doing analsis) def import_gpg(): return pd.read_csv(filepath) # Show type: gpg now references an instance of a DataFrame that contains the contents of Glassdoor Gender Pay Gap. type(gpg) # Show the start of the dataset: gpg.head(10) # ### Inspecting the Gender Pay Gap DataFrame gpg.shape gpg.columns # .shape and .columns are updated later after adding a tenth column titled "Total Income" which combines BasePay with Bonus gpg.dtypes pd.DataFrame(gpg.dtypes, columns=["Data Type"]) gpg.describe() # # Missing Data gpg.isnull().head() # sum the number of missing data gpg.isnull().sum() # # Framing the Problem # ## What influence does Gender have on income? # After ingesting and cleaning the dataset, the following hypothesis are explored: # * Statistical Differences across Gender in Base Pay, Bonuses, and Total Income (both Base Pay and Bonuses) # * Whether performance evaluations show differences in Base Pay and Total Income when sorting based on Gender # * How differences in education level impact income (for both genders) # # Statistical Differences across Gender in Base Pay, Bonuses, and Total Income # ### Creating a New Column that Combines Base Pay and Bonus Pay into "Total Income" # gpg["TotalIncome"] = gpg["BasePay"] + gpg["Bonus"] gpg.tail() # ### Breakout of Data into two frames by gender female_data = gpg[gpg.Gender == "Female"].copy() female_data # Female BasePay and Bonus Statistics female_data[["Gender", "BasePay", "Bonus"]].describe().round(2) print(stats.median(female_data["BasePay"])) print(stats.median(female_data["Bonus"])) stats.median(female_data["TotalIncome"]) # Check Normality Assumption # #histograms to understand distribution # Calculate the number of bins: # Bin Width - (Max - Min) / sqrt(n) # where n = the number of bins math.sqrt(468) female_data["BasePay"].plot(kind="hist", bins=22) female_data["Bonus"].plot(kind="hist", bins=22) female_data["TotalIncome"].plot(kind="hist", bins=22) # ## Repeat for Male Data male_data = gpg[gpg.Gender == "Male"].copy() male_data # Male BasePay and Bonus Statistics male_data[["Gender", "BasePay", "Bonus"]].describe().round(2) print(stats.median(male_data["BasePay"])) print(stats.median(male_data["Bonus"])) stats.median(male_data["TotalIncome"]) math.sqrt(532) male_data["BasePay"].plot(kind="hist", bins=23) male_data["Bonus"].plot(kind="hist", bins=23) male_data["TotalIncome"].plot(kind="hist", bins=23) # # Gender's Impact on Base Pay, Bonus Pay, and Total Income # Assumption: Normality; checked graphically # * Median Base Pay was normal; Bonuses for female were borderline not normally distributed but still appeared to be the best distribution of fit # Median Base Pay: # * Females: 89,913 # * Males: 98,223 # Median Bonus: # * Females: 6,553 # * Males: 6,481 # Median Total Income: # * Females: 96,571 # * Males: 105,101 # # Whether performance evaluations show differences in Base Pay and Total Income when sorting based on Gender female_data.plot( kind="scatter", x="PerfEval", y="BasePay", figsize=(10, 6), s=15, c="pink" ) plt.ylabel("Base Pay") plt.xlabel("Performance Evaluation Rating") plt.title("Female Base Pay vs. Performance Rating") female_data.plot( kind="scatter", x="PerfEval", y="Bonus", figsize=(10, 6), s=15, c="pink" ) plt.ylabel("Bonus") plt.xlabel("Performance Evaluation Rating") plt.title("Female Bonus vs. Performance Rating") female_data.plot( kind="scatter", x="PerfEval", y="TotalIncome", figsize=(10, 6), s=15, c="pink" ) plt.ylabel("Total Income") plt.xlabel("Performance Evaluation Rating") plt.title("Female Total Income vs. Performance Rating") male_data.plot(kind="scatter", x="PerfEval", y="BasePay", figsize=(10, 6), s=15) plt.ylabel("Base Pay") plt.xlabel("Performance Evaluation Rating") plt.title("Male Base Pay vs. Performance Rating") male_data.plot(kind="scatter", x="PerfEval", y="Bonus", figsize=(10, 6), s=15) plt.ylabel("Bonus") plt.xlabel("Performance Evaluation Rating") plt.title("Male Bonus Pay vs. Performance Rating") male_data.plot(kind="scatter", x="PerfEval", y="TotalIncome", figsize=(10, 6), s=15) plt.ylabel("Total Income") plt.xlabel("Performance Evaluation Rating") plt.title("Male Total Income vs. Performance Rating") # ### Conclusions: # * Females and Males are very close across receiving bonus pay for good performance evaluations # * Both genders don’t seem to have a huge impact on base pay or total income across performance rating # # How differences in education level impact income (for both genders) # def as_million(value): return value / 1_000_000 test_income = ( gpg[["Education", "TotalIncome"]] .groupby("Education") .sum() .sort_values("TotalIncome", ascending=False) .head(100) .apply(as_million) .rename(columns={"TotalIncome": "Total Income"}) ) test_income.index.name = "Total Income" test_income test_income.plot(kind="bar", figsize=(11, 6)) plt.ylabel("Total Income in Millions") plt.xlabel("Education Level") plt.title("Total Income vs. Education Level") # ## Looking at Sample Size per Education Level gpg.Education.value_counts() gpg.Education.value_counts().plot(kind="bar")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/684/129684938.ipynb
glassdoor-analyze-gender-pay-gap
nilimajauhari
[{"Id": 129684938, "ScriptId": 38562710, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14142352, "CreationDate": "05/15/2023 18:20:43", "VersionNumber": 11.0, "Title": "Examination of the Gender Pay Gap", "EvaluationDate": "05/15/2023", "IsChange": false, "TotalLines": 218.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 218.0, "LinesInsertedFromFork": 107.0, "LinesDeletedFromFork": 87.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 111.0, "TotalVotes": 0}]
[{"Id": 186007441, "KernelVersionId": 129684938, "SourceDatasetVersionId": 1482109}]
[{"Id": 1482109, "DatasetId": 869852, "DatasourceVersionId": 1515965, "CreatorUserId": 2884552, "LicenseName": "Other (specified in description)", "CreationDate": "09/12/2020 10:29:56", "VersionNumber": 1.0, "Title": "Glassdoor- Analyze Gender Pay Gap", "Slug": "glassdoor-analyze-gender-pay-gap", "Subtitle": "Anlayze the Pay Gap for different job titles based on the gender", "Description": "### Context\n\nWant to know the base pay for different job roles, then this data set will be useful.\n\n\n### About the data set:\nThe data set has been taken from glassdoor and focuses on income for various job titles based on gender. As there have been many studies showcasing that women are paid less than men for the same job titles, this data set will be helpful in identifying the depth of the gender-based pay gap. The features of the data set are:\nJob Title\nGender\nAge\nPerfEval\nEducation\nDept \nSeniority\nBase Pay, and\nBonus\n\n\n\n### Acknowledgements\nThe data set has been taken from the website of Glassdoor. The license was not mentioned on the source.\n\n### Inspiration\nTo find out the pay gap between the gender for the same job title.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 869852, "CreatorUserId": 2884552, "OwnerUserId": 2884552.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1482109.0, "CurrentDatasourceVersionId": 1515965.0, "ForumId": 885237, "Type": 2, "CreationDate": "09/12/2020 10:29:56", "LastActivityDate": "09/12/2020", "TotalViews": 35813, "TotalDownloads": 4648, "TotalVotes": 98, "TotalKernels": 20}]
[{"Id": 2884552, "UserName": "nilimajauhari", "DisplayName": "Neelima Jauhari", "RegisterDate": "03/02/2019", "PerformanceTier": 2}]
# # Examination of the Gender Pay Gap # ### Summary: This notebook analyzes the Glassdoor data set that a variety of variables related to gender and pay. # The data set was taken via Kaggle from the site Glassdoor. It focuses on income for various job titles, as well as listing bonuses and potential confounding factors such as age. As there have been many studies showcasing that women are paid less than men for the same job titles, this data set will be helpful in exploring factors relating to or discovering a gender-based pay gap. This data was last updated in 2020. # | Field | Description | # |-----------|-----------------------------------------------------| # | Job Title | Marketing Associate, Software Engineer, or "Other" | # | Gender | Male or Female | # | Age | Age in years | # | PerfEval | Performance Evaluation score | # | Education | Level of Education | # | Dept | Department | # | Seniority | Number of years worked | # | BasePay | Annual Basic Pay in $ | # | Bonus | Annual Bonus Pay | # ## Data Input & Cleaning import pandas as pd import matplotlib.pyplot as plt import statistics as stats import math as math pd.read_csv( "/kaggle/input/glassdoor-analyze-gender-pay-gap/Glassdoor Gender Pay Gap.csv" ) # ### Variable Nomenclature & Basic Dataset Understanding # In order to shorten the title of the data set to more readily use analysis tools, shorten 'Gender Pay Gap.csv' to gpg. filepath = "/kaggle/input/glassdoor-analyze-gender-pay-gap/Glassdoor Gender Pay Gap.csv" gpg = pd.read_csv(filepath) # Method to re-import data (for doing analsis) def import_gpg(): return pd.read_csv(filepath) # Show type: gpg now references an instance of a DataFrame that contains the contents of Glassdoor Gender Pay Gap. type(gpg) # Show the start of the dataset: gpg.head(10) # ### Inspecting the Gender Pay Gap DataFrame gpg.shape gpg.columns # .shape and .columns are updated later after adding a tenth column titled "Total Income" which combines BasePay with Bonus gpg.dtypes pd.DataFrame(gpg.dtypes, columns=["Data Type"]) gpg.describe() # # Missing Data gpg.isnull().head() # sum the number of missing data gpg.isnull().sum() # # Framing the Problem # ## What influence does Gender have on income? # After ingesting and cleaning the dataset, the following hypothesis are explored: # * Statistical Differences across Gender in Base Pay, Bonuses, and Total Income (both Base Pay and Bonuses) # * Whether performance evaluations show differences in Base Pay and Total Income when sorting based on Gender # * How differences in education level impact income (for both genders) # # Statistical Differences across Gender in Base Pay, Bonuses, and Total Income # ### Creating a New Column that Combines Base Pay and Bonus Pay into "Total Income" # gpg["TotalIncome"] = gpg["BasePay"] + gpg["Bonus"] gpg.tail() # ### Breakout of Data into two frames by gender female_data = gpg[gpg.Gender == "Female"].copy() female_data # Female BasePay and Bonus Statistics female_data[["Gender", "BasePay", "Bonus"]].describe().round(2) print(stats.median(female_data["BasePay"])) print(stats.median(female_data["Bonus"])) stats.median(female_data["TotalIncome"]) # Check Normality Assumption # #histograms to understand distribution # Calculate the number of bins: # Bin Width - (Max - Min) / sqrt(n) # where n = the number of bins math.sqrt(468) female_data["BasePay"].plot(kind="hist", bins=22) female_data["Bonus"].plot(kind="hist", bins=22) female_data["TotalIncome"].plot(kind="hist", bins=22) # ## Repeat for Male Data male_data = gpg[gpg.Gender == "Male"].copy() male_data # Male BasePay and Bonus Statistics male_data[["Gender", "BasePay", "Bonus"]].describe().round(2) print(stats.median(male_data["BasePay"])) print(stats.median(male_data["Bonus"])) stats.median(male_data["TotalIncome"]) math.sqrt(532) male_data["BasePay"].plot(kind="hist", bins=23) male_data["Bonus"].plot(kind="hist", bins=23) male_data["TotalIncome"].plot(kind="hist", bins=23) # # Gender's Impact on Base Pay, Bonus Pay, and Total Income # Assumption: Normality; checked graphically # * Median Base Pay was normal; Bonuses for female were borderline not normally distributed but still appeared to be the best distribution of fit # Median Base Pay: # * Females: 89,913 # * Males: 98,223 # Median Bonus: # * Females: 6,553 # * Males: 6,481 # Median Total Income: # * Females: 96,571 # * Males: 105,101 # # Whether performance evaluations show differences in Base Pay and Total Income when sorting based on Gender female_data.plot( kind="scatter", x="PerfEval", y="BasePay", figsize=(10, 6), s=15, c="pink" ) plt.ylabel("Base Pay") plt.xlabel("Performance Evaluation Rating") plt.title("Female Base Pay vs. Performance Rating") female_data.plot( kind="scatter", x="PerfEval", y="Bonus", figsize=(10, 6), s=15, c="pink" ) plt.ylabel("Bonus") plt.xlabel("Performance Evaluation Rating") plt.title("Female Bonus vs. Performance Rating") female_data.plot( kind="scatter", x="PerfEval", y="TotalIncome", figsize=(10, 6), s=15, c="pink" ) plt.ylabel("Total Income") plt.xlabel("Performance Evaluation Rating") plt.title("Female Total Income vs. Performance Rating") male_data.plot(kind="scatter", x="PerfEval", y="BasePay", figsize=(10, 6), s=15) plt.ylabel("Base Pay") plt.xlabel("Performance Evaluation Rating") plt.title("Male Base Pay vs. Performance Rating") male_data.plot(kind="scatter", x="PerfEval", y="Bonus", figsize=(10, 6), s=15) plt.ylabel("Bonus") plt.xlabel("Performance Evaluation Rating") plt.title("Male Bonus Pay vs. Performance Rating") male_data.plot(kind="scatter", x="PerfEval", y="TotalIncome", figsize=(10, 6), s=15) plt.ylabel("Total Income") plt.xlabel("Performance Evaluation Rating") plt.title("Male Total Income vs. Performance Rating") # ### Conclusions: # * Females and Males are very close across receiving bonus pay for good performance evaluations # * Both genders don’t seem to have a huge impact on base pay or total income across performance rating # # How differences in education level impact income (for both genders) # def as_million(value): return value / 1_000_000 test_income = ( gpg[["Education", "TotalIncome"]] .groupby("Education") .sum() .sort_values("TotalIncome", ascending=False) .head(100) .apply(as_million) .rename(columns={"TotalIncome": "Total Income"}) ) test_income.index.name = "Total Income" test_income test_income.plot(kind="bar", figsize=(11, 6)) plt.ylabel("Total Income in Millions") plt.xlabel("Education Level") plt.title("Total Income vs. Education Level") # ## Looking at Sample Size per Education Level gpg.Education.value_counts() gpg.Education.value_counts().plot(kind="bar")
false
1
2,028
0
2,192
2,028
129895716
<jupyter_start><jupyter_text>Car Acceptability Classification Dataset &gt;Car Acceptability Classification Database was derived from a simple hierarchical decision model originally developed for the demonstration of DEX, M. Bohanec, V. Rajkovic: Expert system for decision making. Sistemica 1(1), pp. 145-157, 1990.). The model evaluates cars according to the following concept structure: `Car Acceptability Classification Dataset` &gt;1. **Buying_Price** - Categorical Data [*vhigh, high, med, low*] 2. **Maintenance_Price** - Categorical Data [*vhigh, high, med, low*] 3. **No_of_Doors** - Categorical Data [*2, 3, 4, 5more*] 4. **Person_Capacity** - Categorical Data [*2, 4, more*] 5. **Size_of_Luggage** - Categorical Data [*small, med, big*] 6. **Safety** - Categorical Data [*low, med, high*] 7. **Car_Acceptability** - Categorical Data [*unacc, acc, good, vgood*] Kaggle dataset identifier: car-acceptability-classification-dataset <jupyter_script># # Car Acceptability Multiclass Classification Using DNN (99.42% Accuracy) # #### Kevin Putra Santoso - Departemen Teknologi Informasi - Institut Teknologi Sepuluh Nopember import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/car-acceptability-classification-dataset/car.csv") df.shape df for i in df.columns: print(f"Fitur {i}") print(df[i].value_counts()) for i in df.columns: df = pd.concat([df, pd.get_dummies(df[i], prefix=f"{i}")], axis=1) df.drop(columns=i, inplace=True) df import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(20, 15)) sns.heatmap(df.corr(), annot=True) X = df[df.columns[:-4]] y = df[df.columns[-4:]] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras.optimizers import Adam model = keras.Sequential( [ layers.Dense(64, activation="relu", input_shape=[X_train.shape[1]]), layers.Dropout(0.3), layers.Dense(128, activation="relu"), layers.Dropout(0.3), layers.Dense(128, activation="relu"), layers.Dropout(0.3), layers.Dense(128, activation="relu"), layers.Dropout(0.3), layers.Dense(128, activation="relu"), layers.Dropout(0.3), layers.Dense(4, activation="softmax"), ] ) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) cp = ModelCheckpoint("model/", save_best_only=True) history = model.fit( X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=64, callbacks=[cp], ) plt.plot(history.history["accuracy"]) plt.plot(history.history["val_accuracy"]) plt.title("Model accuracy") plt.ylabel("Accuracy") plt.xlabel("Epoch") plt.legend(["train", "test"], loc="upper left") plt.show() y_pred = model.predict(X_test) y_test y_pred = pd.DataFrame(y_pred, columns=list(y_test.columns)) y_test_list = [] y_pred_list = [] for i in range(len(y_pred)): y_test_list.append(y_test.columns[np.argmax(y_test.iloc[i])]) y_pred_list.append(y_pred.columns[np.argmax(y_pred.iloc[i])]) from sklearn.metrics import classification_report, confusion_matrix print(classification_report(y_test_list, y_pred_list, digits=4)) plt.figure(figsize=(10, 8)) sns.heatmap(confusion_matrix(y_test_list, y_pred_list), annot=True) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/895/129895716.ipynb
car-acceptability-classification-dataset
subhajeetdas
[{"Id": 129895716, "ScriptId": 38631801, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8158363, "CreationDate": "05/17/2023 09:13:52", "VersionNumber": 1.0, "Title": "Car Acceptability Classification Using DNN (99.4%)", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 112.0, "LinesInsertedFromPrevious": 112.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 11}]
[{"Id": 186306594, "KernelVersionId": 129895716, "SourceDatasetVersionId": 5498151}]
[{"Id": 5498151, "DatasetId": 3172501, "DatasourceVersionId": 5572526, "CreatorUserId": 10127031, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "04/23/2023 15:31:34", "VersionNumber": 1.0, "Title": "Car Acceptability Classification Dataset", "Slug": "car-acceptability-classification-dataset", "Subtitle": "Classify a car's acceptability based on certain criteria.", "Description": "&gt;Car Acceptability Classification Database was derived from a simple hierarchical decision model originally developed for the demonstration of DEX, M. Bohanec, V. Rajkovic: Expert system for decision making. Sistemica 1(1), pp. 145-157, 1990.). The model evaluates cars according to the following concept structure:\n\n`Car Acceptability Classification Dataset`\n\n&gt;1. **Buying_Price** - Categorical Data [*vhigh, high, med, low*]\n2. **Maintenance_Price** - Categorical Data [*vhigh, high, med, low*]\n3. **No_of_Doors** - Categorical Data [*2, 3, 4, 5more*]\n4. **Person_Capacity** - Categorical Data [*2, 4, more*]\n5. **Size_of_Luggage** - Categorical Data [*small, med, big*]\n6. **Safety** - Categorical Data [*low, med, high*]\n7. **Car_Acceptability** - Categorical Data [*unacc, acc, good, vgood*]", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3172501, "CreatorUserId": 10127031, "OwnerUserId": 10127031.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5498151.0, "CurrentDatasourceVersionId": 5572526.0, "ForumId": 3236732, "Type": 2, "CreationDate": "04/23/2023 15:31:34", "LastActivityDate": "04/23/2023", "TotalViews": 10560, "TotalDownloads": 1398, "TotalVotes": 36, "TotalKernels": 12}]
[{"Id": 10127031, "UserName": "subhajeetdas", "DisplayName": "Subhajeet Das", "RegisterDate": "04/03/2022", "PerformanceTier": 2}]
# # Car Acceptability Multiclass Classification Using DNN (99.42% Accuracy) # #### Kevin Putra Santoso - Departemen Teknologi Informasi - Institut Teknologi Sepuluh Nopember import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/car-acceptability-classification-dataset/car.csv") df.shape df for i in df.columns: print(f"Fitur {i}") print(df[i].value_counts()) for i in df.columns: df = pd.concat([df, pd.get_dummies(df[i], prefix=f"{i}")], axis=1) df.drop(columns=i, inplace=True) df import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(20, 15)) sns.heatmap(df.corr(), annot=True) X = df[df.columns[:-4]] y = df[df.columns[-4:]] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras.optimizers import Adam model = keras.Sequential( [ layers.Dense(64, activation="relu", input_shape=[X_train.shape[1]]), layers.Dropout(0.3), layers.Dense(128, activation="relu"), layers.Dropout(0.3), layers.Dense(128, activation="relu"), layers.Dropout(0.3), layers.Dense(128, activation="relu"), layers.Dropout(0.3), layers.Dense(128, activation="relu"), layers.Dropout(0.3), layers.Dense(4, activation="softmax"), ] ) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) cp = ModelCheckpoint("model/", save_best_only=True) history = model.fit( X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=64, callbacks=[cp], ) plt.plot(history.history["accuracy"]) plt.plot(history.history["val_accuracy"]) plt.title("Model accuracy") plt.ylabel("Accuracy") plt.xlabel("Epoch") plt.legend(["train", "test"], loc="upper left") plt.show() y_pred = model.predict(X_test) y_test y_pred = pd.DataFrame(y_pred, columns=list(y_test.columns)) y_test_list = [] y_pred_list = [] for i in range(len(y_pred)): y_test_list.append(y_test.columns[np.argmax(y_test.iloc[i])]) y_pred_list.append(y_pred.columns[np.argmax(y_pred.iloc[i])]) from sklearn.metrics import classification_report, confusion_matrix print(classification_report(y_test_list, y_pred_list, digits=4)) plt.figure(figsize=(10, 8)) sns.heatmap(confusion_matrix(y_test_list, y_pred_list), annot=True) plt.show()
false
1
1,010
11
1,300
1,010
129895068
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import matplotlib.pyplot as plt from statsmodels.tsa.stattools import adfuller np.random.seed(42) n_steps = 500 random_walk = np.cumsum( np.random.randn(n_steps) ) # Cumulative sum of random normal variables initial_value = 10 # Choose an initial value as 10 random_walk += initial_value # random_walk plt.plot(random_walk) plt.xlabel("Time") plt.ylabel("Value") plt.title("Simulated Random Walk") plt.show() from statsmodels.tsa.stattools import adfuller ADF_result = adfuller(random_walk) print(f"ADF Statistic: {ADF_result[0]}") print(f"p-value: {ADF_result[1]}") # **Since p-value greater than 0.05, we cannot reject the null hypothesis stating that our time series is not stationary. ​** from statsmodels.graphics.tsaplots import plot_acf plot_acf(random_walk, lags=20) # differencing diff_random_walk = np.diff(random_walk, n=1) plt.plot(diff_random_walk) # **From the above Diagram we can clearly see it is stationary now** # ACF test after differencing from statsmodels.graphics.tsaplots import plot_acf plot_acf(diff_random_walk, lags=20) # **Since it has no autocorrelation after differencing, it is a random walk.** # 80% of train data split = int((80 / 100) * random_walk.shape[0]) train = random_walk[:split] test = random_walk[split:] # **Historical Mean** overall_mean = train.mean() overall_mean_train = pd.Series([overall_mean] * test.shape[0]) import math MSE = np.square(np.subtract(overall_mean_train, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Value** # train last_value = train[-1] last_test = pd.Series([last_value] * test.shape[0]) import math MSE = np.square(np.subtract(last_test, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Month** train[-30:].mean() last_month = train[-30:].mean() last_month = pd.Series([last_month] * test.shape[0]) import math MSE = np.square(np.subtract(last_month, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last month has lower Mean square error among all the naive forcast methods.** plt.plot(test, label="Actual") plt.plot(last_month, label="Naive Last Month Forecast") plt.plot(last_test, label="Naive Last Value Forecast") plt.plot(overall_mean_train, label="Naive Historical Mean Forecast") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show() # **SHIFT** test_shift = np.roll(test, 1) # taking last value test_shift[0] = train[-1] test_shift import math MSE = np.square(np.subtract(test_shift, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **SHIFT has lowers Mean square error among all the naive forcast methods. It decreases MSE compare to other naive forcasts** plt.plot(test, label="Actual") plt.plot(test_shift, label="Naive Shift Forecast ") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/895/129895068.ipynb
null
null
[{"Id": 129895068, "ScriptId": 38627623, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13833479, "CreationDate": "05/17/2023 09:08:45", "VersionNumber": 1.0, "Title": "Exercise-1_random_walk", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 152.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import matplotlib.pyplot as plt from statsmodels.tsa.stattools import adfuller np.random.seed(42) n_steps = 500 random_walk = np.cumsum( np.random.randn(n_steps) ) # Cumulative sum of random normal variables initial_value = 10 # Choose an initial value as 10 random_walk += initial_value # random_walk plt.plot(random_walk) plt.xlabel("Time") plt.ylabel("Value") plt.title("Simulated Random Walk") plt.show() from statsmodels.tsa.stattools import adfuller ADF_result = adfuller(random_walk) print(f"ADF Statistic: {ADF_result[0]}") print(f"p-value: {ADF_result[1]}") # **Since p-value greater than 0.05, we cannot reject the null hypothesis stating that our time series is not stationary. ​** from statsmodels.graphics.tsaplots import plot_acf plot_acf(random_walk, lags=20) # differencing diff_random_walk = np.diff(random_walk, n=1) plt.plot(diff_random_walk) # **From the above Diagram we can clearly see it is stationary now** # ACF test after differencing from statsmodels.graphics.tsaplots import plot_acf plot_acf(diff_random_walk, lags=20) # **Since it has no autocorrelation after differencing, it is a random walk.** # 80% of train data split = int((80 / 100) * random_walk.shape[0]) train = random_walk[:split] test = random_walk[split:] # **Historical Mean** overall_mean = train.mean() overall_mean_train = pd.Series([overall_mean] * test.shape[0]) import math MSE = np.square(np.subtract(overall_mean_train, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Value** # train last_value = train[-1] last_test = pd.Series([last_value] * test.shape[0]) import math MSE = np.square(np.subtract(last_test, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Month** train[-30:].mean() last_month = train[-30:].mean() last_month = pd.Series([last_month] * test.shape[0]) import math MSE = np.square(np.subtract(last_month, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last month has lower Mean square error among all the naive forcast methods.** plt.plot(test, label="Actual") plt.plot(last_month, label="Naive Last Month Forecast") plt.plot(last_test, label="Naive Last Value Forecast") plt.plot(overall_mean_train, label="Naive Historical Mean Forecast") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show() # **SHIFT** test_shift = np.roll(test, 1) # taking last value test_shift[0] = train[-1] test_shift import math MSE = np.square(np.subtract(test_shift, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **SHIFT has lowers Mean square error among all the naive forcast methods. It decreases MSE compare to other naive forcasts** plt.plot(test, label="Actual") plt.plot(test_shift, label="Naive Shift Forecast ") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show()
false
0
1,235
0
1,235
1,235
129895669
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # EXERCISE ON L & T STOCK PRICES import numpy as np import matplotlib.pyplot as plt from statsmodels.tsa.stattools import adfuller import pandas as pd df = pd.read_csv("/kaggle/input/l-and-t-data/LT.NS.csv") #:May 17, 2022 - May 17, 2023 df # random_walk plt.plot(df["Close"]) plt.xlabel("Time") plt.ylabel("Value") plt.title("Simulated Random Walk") plt.show() from statsmodels.tsa.stattools import adfuller ADF_result = adfuller(df["Close"]) print(f"ADF Statistic: {ADF_result[0]}") print(f"p-value: {ADF_result[1]}") # **Since p-value greater than 0.05, we cannot reject the null hypothesis stating that our time series is not stationary. ​** from statsmodels.graphics.tsaplots import plot_acf plot_acf(df["Close"], lags=20) # differencing diff_random_walk = np.diff(df["Close"], n=1) plt.plot(diff_random_walk) # **From the above Diagram we can clearly see it is stationary now** # ACF test after differencing from statsmodels.graphics.tsaplots import plot_acf plot_acf(diff_random_walk, lags=20) # **Since it has no autocorrelation after differencing, it is a random walk.** train = df.iloc[:-5, -3].to_numpy() test = df.iloc[-5:, -3].to_numpy() train test # **Historical Mean** overall_mean = train.mean() overall_mean_train = pd.Series([overall_mean] * test.shape[0]) import math MSE = np.square(np.subtract(overall_mean_train, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Value** # train last_value = train[-1] last_test = pd.Series([last_value] * test.shape[0]) import math MSE = np.square(np.subtract(last_test, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Month** train[-30:].mean() last_month = train[-30:].mean() last_month = pd.Series([last_month] * test.shape[0]) import math MSE = np.square(np.subtract(last_month, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last month has lower Mean square error among all the naive forcast methods.** plt.plot(test, label="Actual") plt.plot(last_month, label="Naive Last Month Forecast") plt.plot(last_test, label="Naive Last Value Forecast") plt.plot(overall_mean_train, label="Naive Historical Mean Forecast") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show() # **SHIFT** test_shift = np.roll(test, 1) # taking last value test_shift[0] = train[-1] test_shift import math MSE = np.square(np.subtract(test_shift, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) plt.plot(test, label="Actual") plt.plot(test_shift, label="Naive Shift Forecast ") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show() # **SHIFT has lowers Mean square error among all the naive forcast methods. It decreases MSE compare to other naive forcasts** # # EXERCISE 2 FOR GOOGLE DATA df2 = pd.read_csv("/kaggle/input/google-1/GOOGL.csv") df2 plt.plot(df2["Close"]) plt.xlabel("Time") plt.ylabel("Value") plt.title("Simulated Random Walk") plt.show() # **From the above diagram, it seems to be non stationary** from statsmodels.tsa.stattools import adfuller ADF_result = adfuller(df2["Close"]) print(f"ADF Statistic: {ADF_result[0]}") print(f"p-value: {ADF_result[1]}") # **Since p-value greater than 0.05, we cannot reject the null hypothesis stating that our time series is not stationary.** from statsmodels.graphics.tsaplots import plot_acf plot_acf(df2["Close"], lags=20) # differencing diff_random_walk_google = np.diff(df2["Close"], n=1) plt.plot(diff_random_walk_google) # **From the above Diagram we can clearly see it is stationary now** # ACF test after differencing from statsmodels.graphics.tsaplots import plot_acf plot_acf(diff_random_walk_google, lags=20) # **Since it has no autocorrelation after differencing, it is a random walk.** train_google = df2.iloc[:-5, -3].to_numpy() test_google = df2.iloc[-5:, -3].to_numpy() # **Historical Mean** overall_mean_google = train_google.mean() overall_mean_train_google = pd.Series([overall_mean] * test_google.shape[0]) import math MSE = np.square(np.subtract(overall_mean_train_google, test_google)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Value** last_value = train_google[-1] last_test_google = pd.Series([last_value] * test_google.shape[0]) import math MSE = np.square(np.subtract(last_test_google, test_google)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Month** last_month_google = train_google[-30:].mean() last_month_google = pd.Series([last_month_google] * test_google.shape[0]) import math MSE = np.square(np.subtract(last_month_google, test_google)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last value has lower Mean square error among all the naive forcast methods.** plt.plot(test_google, label="Actual") plt.plot(last_month_google, label="Naive Last Month Forecast") plt.plot(last_test_google, label="Naive Last Value Forecast") plt.plot(overall_mean_train_google, label="Naive Historical Mean Forecast") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show() # **SHIFT** test_shift_google = np.roll(test_google, 1) test_shift_google[0] = train_google[-1] import math MSE = np.square(np.subtract(test_shift_google, test_google)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last value has lower Mean square error than SHIFT forcast. SHIFT does not decreases MSE compare to last value naive forcast** # plt.plot(test_google, label="Actual") plt.plot(test_shift_google, label="Naive Shift Forecast ") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/895/129895669.ipynb
null
null
[{"Id": 129895669, "ScriptId": 38630750, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13833479, "CreationDate": "05/17/2023 09:13:25", "VersionNumber": 1.0, "Title": "L & T_and_Google_Random_Walk", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 270.0, "LinesInsertedFromPrevious": 270.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # EXERCISE ON L & T STOCK PRICES import numpy as np import matplotlib.pyplot as plt from statsmodels.tsa.stattools import adfuller import pandas as pd df = pd.read_csv("/kaggle/input/l-and-t-data/LT.NS.csv") #:May 17, 2022 - May 17, 2023 df # random_walk plt.plot(df["Close"]) plt.xlabel("Time") plt.ylabel("Value") plt.title("Simulated Random Walk") plt.show() from statsmodels.tsa.stattools import adfuller ADF_result = adfuller(df["Close"]) print(f"ADF Statistic: {ADF_result[0]}") print(f"p-value: {ADF_result[1]}") # **Since p-value greater than 0.05, we cannot reject the null hypothesis stating that our time series is not stationary. ​** from statsmodels.graphics.tsaplots import plot_acf plot_acf(df["Close"], lags=20) # differencing diff_random_walk = np.diff(df["Close"], n=1) plt.plot(diff_random_walk) # **From the above Diagram we can clearly see it is stationary now** # ACF test after differencing from statsmodels.graphics.tsaplots import plot_acf plot_acf(diff_random_walk, lags=20) # **Since it has no autocorrelation after differencing, it is a random walk.** train = df.iloc[:-5, -3].to_numpy() test = df.iloc[-5:, -3].to_numpy() train test # **Historical Mean** overall_mean = train.mean() overall_mean_train = pd.Series([overall_mean] * test.shape[0]) import math MSE = np.square(np.subtract(overall_mean_train, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Value** # train last_value = train[-1] last_test = pd.Series([last_value] * test.shape[0]) import math MSE = np.square(np.subtract(last_test, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Month** train[-30:].mean() last_month = train[-30:].mean() last_month = pd.Series([last_month] * test.shape[0]) import math MSE = np.square(np.subtract(last_month, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last month has lower Mean square error among all the naive forcast methods.** plt.plot(test, label="Actual") plt.plot(last_month, label="Naive Last Month Forecast") plt.plot(last_test, label="Naive Last Value Forecast") plt.plot(overall_mean_train, label="Naive Historical Mean Forecast") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show() # **SHIFT** test_shift = np.roll(test, 1) # taking last value test_shift[0] = train[-1] test_shift import math MSE = np.square(np.subtract(test_shift, test)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) plt.plot(test, label="Actual") plt.plot(test_shift, label="Naive Shift Forecast ") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show() # **SHIFT has lowers Mean square error among all the naive forcast methods. It decreases MSE compare to other naive forcasts** # # EXERCISE 2 FOR GOOGLE DATA df2 = pd.read_csv("/kaggle/input/google-1/GOOGL.csv") df2 plt.plot(df2["Close"]) plt.xlabel("Time") plt.ylabel("Value") plt.title("Simulated Random Walk") plt.show() # **From the above diagram, it seems to be non stationary** from statsmodels.tsa.stattools import adfuller ADF_result = adfuller(df2["Close"]) print(f"ADF Statistic: {ADF_result[0]}") print(f"p-value: {ADF_result[1]}") # **Since p-value greater than 0.05, we cannot reject the null hypothesis stating that our time series is not stationary.** from statsmodels.graphics.tsaplots import plot_acf plot_acf(df2["Close"], lags=20) # differencing diff_random_walk_google = np.diff(df2["Close"], n=1) plt.plot(diff_random_walk_google) # **From the above Diagram we can clearly see it is stationary now** # ACF test after differencing from statsmodels.graphics.tsaplots import plot_acf plot_acf(diff_random_walk_google, lags=20) # **Since it has no autocorrelation after differencing, it is a random walk.** train_google = df2.iloc[:-5, -3].to_numpy() test_google = df2.iloc[-5:, -3].to_numpy() # **Historical Mean** overall_mean_google = train_google.mean() overall_mean_train_google = pd.Series([overall_mean] * test_google.shape[0]) import math MSE = np.square(np.subtract(overall_mean_train_google, test_google)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Value** last_value = train_google[-1] last_test_google = pd.Series([last_value] * test_google.shape[0]) import math MSE = np.square(np.subtract(last_test_google, test_google)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last Month** last_month_google = train_google[-30:].mean() last_month_google = pd.Series([last_month_google] * test_google.shape[0]) import math MSE = np.square(np.subtract(last_month_google, test_google)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last value has lower Mean square error among all the naive forcast methods.** plt.plot(test_google, label="Actual") plt.plot(last_month_google, label="Naive Last Month Forecast") plt.plot(last_test_google, label="Naive Last Value Forecast") plt.plot(overall_mean_train_google, label="Naive Historical Mean Forecast") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show() # **SHIFT** test_shift_google = np.roll(test_google, 1) test_shift_google[0] = train_google[-1] import math MSE = np.square(np.subtract(test_shift_google, test_google)).mean() RMSE = math.sqrt(MSE) print(MSE) print("Root Mean Square Error:\n") print(RMSE) # **Last value has lower Mean square error than SHIFT forcast. SHIFT does not decreases MSE compare to last value naive forcast** # plt.plot(test_google, label="Actual") plt.plot(test_shift_google, label="Naive Shift Forecast ") plt.xlabel("Time") plt.ylabel("Value") plt.title("Naive Forecasts") plt.legend() plt.show()
false
0
2,256
0
2,256
2,256
129901474
<jupyter_start><jupyter_text>GTZAN Dataset - Music Genre Classification ### Context Music. Experts have been trying for a long time to understand sound and what differenciates one song from another. How to visualize sound. What makes a tone different from another. This data hopefully can give the opportunity to do just that. ### Content * **genres original** - A collection of 10 genres with 100 audio files each, all having a length of 30 seconds (the famous GTZAN dataset, the MNIST of sounds) * **images original** - A visual representation for each audio file. One way to classify data is through neural networks. Because NNs (like CNN, what we will be using today) usually take in some sort of image representation, the audio files were converted to Mel Spectrograms to make this possible. * **2 CSV files** - Containing features of the audio files. One file has for each song (30 seconds long) a mean and variance computed over multiple features that can be extracted from an audio file. The other file has the same structure, but the songs were split before into 3 seconds audio files (this way increasing 10 times the amount of data we fuel into our classification models). *With data, more is always better*. Kaggle dataset identifier: gtzan-dataset-music-genre-classification <jupyter_script># Usual Libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import sklearn # Librosa to handle audio files import librosa import librosa.display import IPython.display as ipd import warnings warnings.filterwarnings("ignore") # the path of file and generals import os general_path = "../input/gtzan-dataset-music-genre-classification/Data" genres = list(os.listdir(f"{general_path}/genres_original/")) genres # # Explore Audio Data # We will use `librosa`, which is the mother of audio files. # ## Understanding Audio # Let's first Explore our Audio Data to see how it looks (we'll work with `reggae.00036.wav` file). # * **Sound**: sequence of vibrations in varying pressure strengths (`y`) # * The **sample rate** (`sr`) is the number of samples of audio carried per second, measured in Hz or kHz # Importing 1 file sound_sequence, sr = librosa.load( f"{general_path}/genres_original/reggae/reggae.00036.wav" ) print("sound_sequence:", sound_sequence, "\n") print("sound_sequence shape:", np.shape(sound_sequence), "\n") print("Sample Rate (KHz):", sr, "\n") # Trim leading and trailing silence from an audio signal (silence before and after the actual audio) audio_file, _ = librosa.effects.trim(sound_sequence) # the result is an numpy ndarray print("Audio File:", audio_file, "\n") print("Audio File shape:", np.shape(audio_file)) # ### 2D Representation: Sound Waves plt.figure(figsize=(16, 6)) librosa.display.waveshow(y=audio_file, sr=sr, color="#A300F9") plt.title("Sound Waves in Reggae 36", fontsize=23) # ## EDA # EDA is going to be performed on the `features_30_sec.csv`. This file contains the mean and variance for each audio file fo the features analysed above. # So, the table has a final of 1000 rows (10 genrex x 100 audio files) and 60 features (dimensionalities). # # Machine Learning Classification # Using the `features_3_sec.csv` file, we can try to build a classifier that accurately predicts for any new audio file input it's genre. # ### Libraries from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, roc_curve from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.feature_selection import RFE from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import cross_val_score, KFold from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Dense from keras.optimizers import Adam from keras.layers import Dropout from keras import regularizers # ### Reading in the Data # Now let's try to predict the Genre of the audio using Machine Learning techniques. data = pd.read_csv(f"{general_path}/features_3_sec.csv") data # ### Features and Target variable # * creates the target and feature variables # * normalizes the data y = data["label"] # genre variable. X = data.drop(["label", "filename"], axis=1) # select all columns but not the labels X = np.asarray(X).astype(np.float32) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) scaler = MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.fit_transform(X_test) # # NN Model (baseline model) epochs_num = 20 batch_size = 5 baseline_model = Sequential() baseline_model.add(Dense(120, input_dim=np.shape(X)[1], activation="relu")) baseline_model.add(Dropout(0.25)) baseline_model.add( Dense( 64, kernel_initializer="normal", kernel_regularizer=regularizers.l2(0.001), activation="relu", ) ) baseline_model.add(Dropout(0.25)) baseline_model.add(Dense(len(genres), activation="softmax")) baseline_model.summary() # Compile the model baseline_model.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] ) y_test = [genres.index(y) for y in y_test] y_test = np.array(y_test) y_train = [genres.index(y) for y in y_train] y_train = np.array(y_train) y_train = to_categorical(y_train) y_test = to_categorical(y_test) history = baseline_model.fit(X_train, y_train, epochs=epochs_num, batch_size=batch_size) y_pred = baseline_model.predict(X_test) y_pred = np.argmax(y_pred, axis=1) y_test = np.argmax(y_test, axis=1) print("NN accuracy : ", (round(accuracy_score(y_test, y_pred), 3)) * 100, "%") nn_acc = round(accuracy_score(y_test, y_pred), 3) * 100 plt.figure(figsize=(12, 6)) plt.plot(history.history["accuracy"], label="Accuracy") plt.plot(history.history["loss"], label="Loss") plt.title("Training History") plt.xlabel("Epoch") plt.legend() plt.show() # # KNN model - Cross Validation knn = KNeighborsClassifier(n_neighbors=len(genres)) num_folds = 5 # number of folds scaler = MinMaxScaler() X = scaler.fit_transform(X) fold_no = 1 y = np.array(y) kfold = KFold(n_splits=num_folds, shuffle=True) acc_array = [] for train, test in kfold.split(X, y): print(f"Fold No.{fold_no}") knn.fit(X[train], y[train]) preds = knn.predict(X[test]) acc = (round(accuracy_score(y[test], preds), 3)) * 100 acc_array.append(acc) print("KNN Accuracy:", acc, "%") print("---------------------------") fold_no = fold_no + 1 knn_acc = np.mean(acc_array) print("Mean KNN Accuracy:", knn_acc, "%") # # Random Forest model - Cross Validation import numpy as np rf = RandomForestClassifier(n_estimators=100) num_folds = 5 # number of folds fold_no = 1 kfold = KFold(n_splits=num_folds, shuffle=True) acc_array = [] for train, test in kfold.split(X, y): print(f"Fold No.{fold_no}") rf.fit(X[train], y[train]) preds = rf.predict(X[test]) acc = (round(accuracy_score(y[test], preds), 3)) * 100 acc_array.append(acc) print("Random Forest Accuracy:", acc, "%") print("---------------------------") fold_no = fold_no + 1 rf_acc = np.mean(acc_array) print("Mean Random Forest Accuracy:", rf_acc, "%") # # Testing row_index = int( input( "Enter the index of the row you want to predict (0 to {}): ".format( len(data) - 1 ) ) ) selected_row = X[row_index] cols = [col if col != "label" or col != "filename" else "" for col in data.columns] cols.remove("label") cols.remove("filename") for i in range(len(cols)): selected_row[i] = (selected_row[i] - min(X[i])) / (max(X[i]) - min(X[i])) selected_row = np.reshape(selected_row, (1, -1)) selected_filename = data.loc[row_index, "filename"] filename_parts = selected_filename.split(".") real_genre = filename_parts[0] print("\nThe true genre is :", real_genre, "\n") preds = [] nn_prediction = baseline_model.predict(selected_row) nn_prediction = np.argmax(nn_prediction, axis=1) nn_genre = genres[nn_prediction[0]] preds.append(nn_genre) knn_prediction = knn.predict(selected_row) knn_genre = knn_prediction[0] preds.append(knn_genre) rf_prediction = rf.predict(selected_row) rf_genre = rf_prediction[0] preds.append(rf_genre) print("\nNeural Network Prediction:", nn_genre) print("K-Nearest Neighbors Prediction:", knn_genre) print("Random Forest Prediction:", rf_genre, "\n") preds = [True if pred == real_genre else False for pred in preds] print(preds.count(True), " out of 3 models got it right !") # # plot the accurcy between models import matplotlib.pyplot as plt models = ["Neural Network", "K-Nearest Neighbors", "Random Forest"] accuracy = [nn_acc, knn_acc, rf_acc] plt.bar(models, accuracy) plt.title("Model Comparison") plt.xlabel("Model") plt.ylabel("Accuracy") plt.show() # # Test on many random rows to plot import random num_rows = int(input("Enter the number of rows you want to predict: ")) predicted_genres = [] real_genres = [] for _ in range(num_rows): row_index = random.randint(0, len(data) - 1) selected_row = X[row_index] cols = [col if col != "label" or col != "filename" else "" for col in data.columns] cols.remove("label") cols.remove("filename") for i in range(len(cols)): selected_row[i] = (selected_row[i] - min(X[i])) / (max(X[i]) - min(X[i])) selected_row = np.reshape(selected_row, (1, -1)) selected_filename = data.loc[row_index, "filename"] filename_parts = selected_filename.split(".") real_genre = filename_parts[0] nn_prediction = baseline_model.predict(selected_row) nn_prediction = np.argmax(nn_prediction, axis=1) nn_genre = genres[nn_prediction[0]] knn_prediction = knn.predict(selected_row) knn_genre = knn_prediction[0] rf_prediction = rf.predict(selected_row) rf_genre = rf_prediction[0] predicted_genres.append([nn_genre, knn_genre, rf_genre]) real_genres.append(real_genre) # # plot predictions data predictions_df = pd.DataFrame( predicted_genres, columns=["NN Genre", "KNN Genre", "RF Genre"] ) predictions_df["Real Genre"] = real_genres genre_counts = predictions_df["Real Genre"].value_counts() genre_order = genre_counts.index plt.figure(figsize=(12, 6)) sns.countplot(data=predictions_df, x="Real Genre", hue="NN Genre", order=genre_order) plt.xlabel("Real Genre") plt.ylabel("Count") plt.title("Comparison of Predicted and Real Genres - NN") plt.legend(title="Predicted Genre") plt.show() plt.figure(figsize=(12, 6)) sns.countplot( data=predictions_df, x="Real Genre", hue="KNN Genre", order=genre_order, palette="Set2", ) plt.xlabel("Real Genre") plt.ylabel("Count") plt.title("Comparison of Predicted and Real Genres - KNN") plt.legend(title="Predicted Genre - KNN") # Create a third subplot for RF predictions plt.figure(figsize=(12, 6)) sns.countplot( data=predictions_df, x="Real Genre", hue="RF Genre", order=genre_order, palette="Set3", ) plt.xlabel("Real Genre") plt.ylabel("Count") plt.title("Comparison of Predicted and Real Genres - RF") plt.legend(title="Predicted Genre - RF") plt.show() # Confusion Matrix for Neural Network (NN) nn_cm = confusion_matrix(real_genres, [genre[0] for genre in predicted_genres]) plt.figure(figsize=(8, 6)) sns.heatmap(nn_cm, annot=True, fmt="d", cmap="Blues") plt.title("Confusion Matrix - Neural Network") plt.xlabel("Predicted Labels") plt.ylabel("True Labels") plt.xticks(ticks=np.arange(len(genres)), labels=genres, rotation=45) plt.yticks(ticks=np.arange(len(genres)), labels=genres, rotation=0) plt.show() # Confusion Matrix for K-Nearest Neighbors (KNN) knn_cm = confusion_matrix(real_genres, [genre[1] for genre in predicted_genres]) plt.figure(figsize=(8, 6)) sns.heatmap(knn_cm, annot=True, fmt="d", cmap="Blues") plt.title("Confusion Matrix - K-Nearest Neighbors") plt.xlabel("Predicted Labels") plt.ylabel("True Labels") plt.xticks(ticks=np.arange(len(genres)), labels=genres, rotation=45) plt.yticks(ticks=np.arange(len(genres)), labels=genres, rotation=0) plt.show() # Confusion Matrix for Random Forest (RF) rf_cm = confusion_matrix(real_genres, [genre[2] for genre in predicted_genres]) plt.figure(figsize=(8, 6)) sns.heatmap(rf_cm, annot=True, fmt="d", cmap="Blues") plt.title("Confusion Matrix - Random Forest") plt.xlabel("Predicted Labels") plt.ylabel("True Labels") plt.xticks(ticks=np.arange(len(genres)), labels=genres, rotation=45) plt.yticks(ticks=np.arange(len(genres)), labels=genres, rotation=0) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/901/129901474.ipynb
gtzan-dataset-music-genre-classification
andradaolteanu
[{"Id": 129901474, "ScriptId": 38638190, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12048261, "CreationDate": "05/17/2023 10:01:53", "VersionNumber": 1.0, "Title": "notebook3c987ef731", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 363.0, "LinesInsertedFromPrevious": 363.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186314636, "KernelVersionId": 129901474, "SourceDatasetVersionId": 1032238}]
[{"Id": 1032238, "DatasetId": 568973, "DatasourceVersionId": 1061257, "CreatorUserId": 3564129, "LicenseName": "Other (specified in description)", "CreationDate": "03/24/2020 14:05:33", "VersionNumber": 1.0, "Title": "GTZAN Dataset - Music Genre Classification", "Slug": "gtzan-dataset-music-genre-classification", "Subtitle": "Audio Files | Mel Spectrograms | CSV with extracted features", "Description": "### Context\n\nMusic. Experts have been trying for a long time to understand sound and what differenciates one song from another. How to visualize sound. What makes a tone different from another.\n\nThis data hopefully can give the opportunity to do just that.\n\n\n### Content\n\n* **genres original** - A collection of 10 genres with 100 audio files each, all having a length of 30 seconds (the famous GTZAN dataset, the MNIST of sounds)\n* **images original** - A visual representation for each audio file. One way to classify data is through neural networks. Because NNs (like CNN, what we will be using today) usually take in some sort of image representation, the audio files were converted to Mel Spectrograms to make this possible.\n* **2 CSV files** - Containing features of the audio files. One file has for each song (30 seconds long) a mean and variance computed over multiple features that can be extracted from an audio file. The other file has the same structure, but the songs were split before into 3 seconds audio files (this way increasing 10 times the amount of data we fuel into our classification models). *With data, more is always better*.\n\n\n### Acknowledgements\n\n* The GTZAN dataset is the most-used public dataset for evaluation in machine listening research for music genre recognition (MGR). The files were collected in 2000-2001 from a variety of sources including personal CDs, radio, microphone recordings, in order to represent a variety of recording conditions (http://marsyas.info/downloads/datasets.html).\n* This was a team project for uni, so the effort in creating the images and features wasn't only my own. So, I want to thank **James Wiltshire, Lauren O'Hare and Minyu Lei** for being the best teammates ever and for having so much fun and learning so much during the 3 days we worked on this.\n\n\n### Inspiration\n\n* what is an audio file?\n* how does an audio file look?\n* can you extract features?\n* can you perform EDA?\n* can you create a super powerful NN on the images?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 568973, "CreatorUserId": 3564129, "OwnerUserId": 3564129.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1032238.0, "CurrentDatasourceVersionId": 1061257.0, "ForumId": 582697, "Type": 2, "CreationDate": "03/24/2020 14:05:33", "LastActivityDate": "03/24/2020", "TotalViews": 304300, "TotalDownloads": 45527, "TotalVotes": 639, "TotalKernels": 154}]
[{"Id": 3564129, "UserName": "andradaolteanu", "DisplayName": "Andrada", "RegisterDate": "08/09/2019", "PerformanceTier": 4}]
# Usual Libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import sklearn # Librosa to handle audio files import librosa import librosa.display import IPython.display as ipd import warnings warnings.filterwarnings("ignore") # the path of file and generals import os general_path = "../input/gtzan-dataset-music-genre-classification/Data" genres = list(os.listdir(f"{general_path}/genres_original/")) genres # # Explore Audio Data # We will use `librosa`, which is the mother of audio files. # ## Understanding Audio # Let's first Explore our Audio Data to see how it looks (we'll work with `reggae.00036.wav` file). # * **Sound**: sequence of vibrations in varying pressure strengths (`y`) # * The **sample rate** (`sr`) is the number of samples of audio carried per second, measured in Hz or kHz # Importing 1 file sound_sequence, sr = librosa.load( f"{general_path}/genres_original/reggae/reggae.00036.wav" ) print("sound_sequence:", sound_sequence, "\n") print("sound_sequence shape:", np.shape(sound_sequence), "\n") print("Sample Rate (KHz):", sr, "\n") # Trim leading and trailing silence from an audio signal (silence before and after the actual audio) audio_file, _ = librosa.effects.trim(sound_sequence) # the result is an numpy ndarray print("Audio File:", audio_file, "\n") print("Audio File shape:", np.shape(audio_file)) # ### 2D Representation: Sound Waves plt.figure(figsize=(16, 6)) librosa.display.waveshow(y=audio_file, sr=sr, color="#A300F9") plt.title("Sound Waves in Reggae 36", fontsize=23) # ## EDA # EDA is going to be performed on the `features_30_sec.csv`. This file contains the mean and variance for each audio file fo the features analysed above. # So, the table has a final of 1000 rows (10 genrex x 100 audio files) and 60 features (dimensionalities). # # Machine Learning Classification # Using the `features_3_sec.csv` file, we can try to build a classifier that accurately predicts for any new audio file input it's genre. # ### Libraries from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, roc_curve from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.feature_selection import RFE from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import cross_val_score, KFold from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Dense from keras.optimizers import Adam from keras.layers import Dropout from keras import regularizers # ### Reading in the Data # Now let's try to predict the Genre of the audio using Machine Learning techniques. data = pd.read_csv(f"{general_path}/features_3_sec.csv") data # ### Features and Target variable # * creates the target and feature variables # * normalizes the data y = data["label"] # genre variable. X = data.drop(["label", "filename"], axis=1) # select all columns but not the labels X = np.asarray(X).astype(np.float32) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) scaler = MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.fit_transform(X_test) # # NN Model (baseline model) epochs_num = 20 batch_size = 5 baseline_model = Sequential() baseline_model.add(Dense(120, input_dim=np.shape(X)[1], activation="relu")) baseline_model.add(Dropout(0.25)) baseline_model.add( Dense( 64, kernel_initializer="normal", kernel_regularizer=regularizers.l2(0.001), activation="relu", ) ) baseline_model.add(Dropout(0.25)) baseline_model.add(Dense(len(genres), activation="softmax")) baseline_model.summary() # Compile the model baseline_model.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] ) y_test = [genres.index(y) for y in y_test] y_test = np.array(y_test) y_train = [genres.index(y) for y in y_train] y_train = np.array(y_train) y_train = to_categorical(y_train) y_test = to_categorical(y_test) history = baseline_model.fit(X_train, y_train, epochs=epochs_num, batch_size=batch_size) y_pred = baseline_model.predict(X_test) y_pred = np.argmax(y_pred, axis=1) y_test = np.argmax(y_test, axis=1) print("NN accuracy : ", (round(accuracy_score(y_test, y_pred), 3)) * 100, "%") nn_acc = round(accuracy_score(y_test, y_pred), 3) * 100 plt.figure(figsize=(12, 6)) plt.plot(history.history["accuracy"], label="Accuracy") plt.plot(history.history["loss"], label="Loss") plt.title("Training History") plt.xlabel("Epoch") plt.legend() plt.show() # # KNN model - Cross Validation knn = KNeighborsClassifier(n_neighbors=len(genres)) num_folds = 5 # number of folds scaler = MinMaxScaler() X = scaler.fit_transform(X) fold_no = 1 y = np.array(y) kfold = KFold(n_splits=num_folds, shuffle=True) acc_array = [] for train, test in kfold.split(X, y): print(f"Fold No.{fold_no}") knn.fit(X[train], y[train]) preds = knn.predict(X[test]) acc = (round(accuracy_score(y[test], preds), 3)) * 100 acc_array.append(acc) print("KNN Accuracy:", acc, "%") print("---------------------------") fold_no = fold_no + 1 knn_acc = np.mean(acc_array) print("Mean KNN Accuracy:", knn_acc, "%") # # Random Forest model - Cross Validation import numpy as np rf = RandomForestClassifier(n_estimators=100) num_folds = 5 # number of folds fold_no = 1 kfold = KFold(n_splits=num_folds, shuffle=True) acc_array = [] for train, test in kfold.split(X, y): print(f"Fold No.{fold_no}") rf.fit(X[train], y[train]) preds = rf.predict(X[test]) acc = (round(accuracy_score(y[test], preds), 3)) * 100 acc_array.append(acc) print("Random Forest Accuracy:", acc, "%") print("---------------------------") fold_no = fold_no + 1 rf_acc = np.mean(acc_array) print("Mean Random Forest Accuracy:", rf_acc, "%") # # Testing row_index = int( input( "Enter the index of the row you want to predict (0 to {}): ".format( len(data) - 1 ) ) ) selected_row = X[row_index] cols = [col if col != "label" or col != "filename" else "" for col in data.columns] cols.remove("label") cols.remove("filename") for i in range(len(cols)): selected_row[i] = (selected_row[i] - min(X[i])) / (max(X[i]) - min(X[i])) selected_row = np.reshape(selected_row, (1, -1)) selected_filename = data.loc[row_index, "filename"] filename_parts = selected_filename.split(".") real_genre = filename_parts[0] print("\nThe true genre is :", real_genre, "\n") preds = [] nn_prediction = baseline_model.predict(selected_row) nn_prediction = np.argmax(nn_prediction, axis=1) nn_genre = genres[nn_prediction[0]] preds.append(nn_genre) knn_prediction = knn.predict(selected_row) knn_genre = knn_prediction[0] preds.append(knn_genre) rf_prediction = rf.predict(selected_row) rf_genre = rf_prediction[0] preds.append(rf_genre) print("\nNeural Network Prediction:", nn_genre) print("K-Nearest Neighbors Prediction:", knn_genre) print("Random Forest Prediction:", rf_genre, "\n") preds = [True if pred == real_genre else False for pred in preds] print(preds.count(True), " out of 3 models got it right !") # # plot the accurcy between models import matplotlib.pyplot as plt models = ["Neural Network", "K-Nearest Neighbors", "Random Forest"] accuracy = [nn_acc, knn_acc, rf_acc] plt.bar(models, accuracy) plt.title("Model Comparison") plt.xlabel("Model") plt.ylabel("Accuracy") plt.show() # # Test on many random rows to plot import random num_rows = int(input("Enter the number of rows you want to predict: ")) predicted_genres = [] real_genres = [] for _ in range(num_rows): row_index = random.randint(0, len(data) - 1) selected_row = X[row_index] cols = [col if col != "label" or col != "filename" else "" for col in data.columns] cols.remove("label") cols.remove("filename") for i in range(len(cols)): selected_row[i] = (selected_row[i] - min(X[i])) / (max(X[i]) - min(X[i])) selected_row = np.reshape(selected_row, (1, -1)) selected_filename = data.loc[row_index, "filename"] filename_parts = selected_filename.split(".") real_genre = filename_parts[0] nn_prediction = baseline_model.predict(selected_row) nn_prediction = np.argmax(nn_prediction, axis=1) nn_genre = genres[nn_prediction[0]] knn_prediction = knn.predict(selected_row) knn_genre = knn_prediction[0] rf_prediction = rf.predict(selected_row) rf_genre = rf_prediction[0] predicted_genres.append([nn_genre, knn_genre, rf_genre]) real_genres.append(real_genre) # # plot predictions data predictions_df = pd.DataFrame( predicted_genres, columns=["NN Genre", "KNN Genre", "RF Genre"] ) predictions_df["Real Genre"] = real_genres genre_counts = predictions_df["Real Genre"].value_counts() genre_order = genre_counts.index plt.figure(figsize=(12, 6)) sns.countplot(data=predictions_df, x="Real Genre", hue="NN Genre", order=genre_order) plt.xlabel("Real Genre") plt.ylabel("Count") plt.title("Comparison of Predicted and Real Genres - NN") plt.legend(title="Predicted Genre") plt.show() plt.figure(figsize=(12, 6)) sns.countplot( data=predictions_df, x="Real Genre", hue="KNN Genre", order=genre_order, palette="Set2", ) plt.xlabel("Real Genre") plt.ylabel("Count") plt.title("Comparison of Predicted and Real Genres - KNN") plt.legend(title="Predicted Genre - KNN") # Create a third subplot for RF predictions plt.figure(figsize=(12, 6)) sns.countplot( data=predictions_df, x="Real Genre", hue="RF Genre", order=genre_order, palette="Set3", ) plt.xlabel("Real Genre") plt.ylabel("Count") plt.title("Comparison of Predicted and Real Genres - RF") plt.legend(title="Predicted Genre - RF") plt.show() # Confusion Matrix for Neural Network (NN) nn_cm = confusion_matrix(real_genres, [genre[0] for genre in predicted_genres]) plt.figure(figsize=(8, 6)) sns.heatmap(nn_cm, annot=True, fmt="d", cmap="Blues") plt.title("Confusion Matrix - Neural Network") plt.xlabel("Predicted Labels") plt.ylabel("True Labels") plt.xticks(ticks=np.arange(len(genres)), labels=genres, rotation=45) plt.yticks(ticks=np.arange(len(genres)), labels=genres, rotation=0) plt.show() # Confusion Matrix for K-Nearest Neighbors (KNN) knn_cm = confusion_matrix(real_genres, [genre[1] for genre in predicted_genres]) plt.figure(figsize=(8, 6)) sns.heatmap(knn_cm, annot=True, fmt="d", cmap="Blues") plt.title("Confusion Matrix - K-Nearest Neighbors") plt.xlabel("Predicted Labels") plt.ylabel("True Labels") plt.xticks(ticks=np.arange(len(genres)), labels=genres, rotation=45) plt.yticks(ticks=np.arange(len(genres)), labels=genres, rotation=0) plt.show() # Confusion Matrix for Random Forest (RF) rf_cm = confusion_matrix(real_genres, [genre[2] for genre in predicted_genres]) plt.figure(figsize=(8, 6)) sns.heatmap(rf_cm, annot=True, fmt="d", cmap="Blues") plt.title("Confusion Matrix - Random Forest") plt.xlabel("Predicted Labels") plt.ylabel("True Labels") plt.xticks(ticks=np.arange(len(genres)), labels=genres, rotation=45) plt.yticks(ticks=np.arange(len(genres)), labels=genres, rotation=0) plt.show()
false
0
3,651
0
3,949
3,651
129901769
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) """Example of using PandasAI with a CSV file.""" from pandasai import PandasAI from pandasai.llm.openai import OpenAI df = pd.read_csv("/kaggle/input/imdb-data/IMDB-Movie-Data.csv") df.head() llm = OpenAI() pandas_ai = PandasAI(llm, verbose=True) response = pandas_ai.run(df, "Make a list which includes the genre types ?") print(response) # Output: 247 loans have been paid off by men.
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/901/129901769.ipynb
null
null
[{"Id": 129901769, "ScriptId": 38570211, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13452101, "CreationDate": "05/17/2023 10:04:41", "VersionNumber": 1.0, "Title": "PandasAI", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 27.0, "LinesInsertedFromPrevious": 27.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) """Example of using PandasAI with a CSV file.""" from pandasai import PandasAI from pandasai.llm.openai import OpenAI df = pd.read_csv("/kaggle/input/imdb-data/IMDB-Movie-Data.csv") df.head() llm = OpenAI() pandas_ai = PandasAI(llm, verbose=True) response = pandas_ai.run(df, "Make a list which includes the genre types ?") print(response) # Output: 247 loans have been paid off by men.
false
0
205
4
205
205
129901659
# tools used import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor() heart = pd.read_csv("/kaggle/input/heart-disease-file/Heart_diease_dataset.csv") # Cleaning and transforming the data # 1. Split the data # 2. converting non-numerical values into numbers # 3. Checking if ther are missing data and filling or deleting it # changing the Y into ints heart["HeartDisease"] = heart["HeartDisease"].replace({"Yes": 1, "No": 0}).astype(int) # the y is the dependent variables(wether or not heart disease is present) # the x is the independent varibles and the x is the features(excluding HeartDisease) x = heart.drop("HeartDisease", axis=1) y = heart["HeartDisease"] # Spliting up the dataset into the test set and train set, the test size is 20% x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) x_train.shape, x_test.shape, y_train.shape, y_test.shape # Need to change all the categorical into numbers # Checking the data types of each category heart.dtypes from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer categorical_features = [ "Smoking", "AlcoholDrinking", "Stroke", "DiffWalking", "Sex", "AgeCategory", ] one_hot = OneHotEncoder() transformer = ColumnTransformer( [("one_hot", one_hot, categorical_features)], remainder="passthrough" ) transformed_x = transformer.fit_transform(x) transformed_x transformed_df = pd.DataFrame(transformed_x) transformed_df np.random.seed(0) x_train, x_test, y_train, y_test = train_test_split(transformed_x, y, test_size=0.2) model.fit(x_train, y_train) model.fit(x_train, y_train)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/901/129901659.ipynb
null
null
[{"Id": 129901659, "ScriptId": 38631168, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13695340, "CreationDate": "05/17/2023 10:03:46", "VersionNumber": 1.0, "Title": "heartdiseaseproject", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 51.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# tools used import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor() heart = pd.read_csv("/kaggle/input/heart-disease-file/Heart_diease_dataset.csv") # Cleaning and transforming the data # 1. Split the data # 2. converting non-numerical values into numbers # 3. Checking if ther are missing data and filling or deleting it # changing the Y into ints heart["HeartDisease"] = heart["HeartDisease"].replace({"Yes": 1, "No": 0}).astype(int) # the y is the dependent variables(wether or not heart disease is present) # the x is the independent varibles and the x is the features(excluding HeartDisease) x = heart.drop("HeartDisease", axis=1) y = heart["HeartDisease"] # Spliting up the dataset into the test set and train set, the test size is 20% x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) x_train.shape, x_test.shape, y_train.shape, y_test.shape # Need to change all the categorical into numbers # Checking the data types of each category heart.dtypes from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer categorical_features = [ "Smoking", "AlcoholDrinking", "Stroke", "DiffWalking", "Sex", "AgeCategory", ] one_hot = OneHotEncoder() transformer = ColumnTransformer( [("one_hot", one_hot, categorical_features)], remainder="passthrough" ) transformed_x = transformer.fit_transform(x) transformed_x transformed_df = pd.DataFrame(transformed_x) transformed_df np.random.seed(0) x_train, x_test, y_train, y_test = train_test_split(transformed_x, y, test_size=0.2) model.fit(x_train, y_train) model.fit(x_train, y_train)
false
0
538
0
538
538
129976396
<jupyter_start><jupyter_text>Linear Regression E-commerce Dataset This dataset is having data of customers who buys clothes online. The store offers in-store style and clothing advice sessions. Customers come in to the store, have sessions/meetings with a personal stylist, then they can go home and order either on a mobile app or website for the clothes they want. The company is trying to decide whether to focus their efforts on their mobile app experience or their website. Kaggle dataset identifier: focusing-on-mobile-app-or-website <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, dataset file I/O (e.g. pd.read_csv) from sklearn.model_selection import train_test_split import seaborn as sns # to visualize random distributions import matplotlib.pyplot as plt from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder from sklearn.metrics import r2_score from scipy.stats.mstats import winsorize from sklearn.impute import SimpleImputer # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/focusing-on-mobile-app-or-website/Ecommerce Customers") df.head() df.info() df.duplicated().sum() counts = df["Avatar"].value_counts() print(counts) # plot the counts using a bar chart with wider bars plt.bar(counts.index, counts.values, color="steelblue", width=0.5) # add axis labels and a title plt.xlabel("Avatar character") plt.ylabel("Count") plt.title("Number of occurrences of each Avatar character in the DataFrame") # rotate the x-axis labels by 45 degrees plt.xticks(rotation=45) # display the plot plt.show() numeric_cols = df.select_dtypes(include=["int64", "float64"]) # loop over the columns and create a plot for each of them for col in numeric_cols.columns: plt.hist(numeric_cols[col], bins=10) plt.xlabel(col) plt.ylabel("Count") plt.title("Histogram of {}".format(col)) plt.show() df = df.drop(columns=["Email", "Address"]) df["Avatar"] = df["Avatar"].astype("category").cat.codes df.head df = (df - df.mean()) / df.std() # rescaling for all data df.head(501) # create an overall outlier mask for the DataFrame outlier_mask = pd.Series([False] * len(df)) # iterate over the columns in the DataFrame and calculate the outlier mask for each column for col in df.columns: q1 = df[col].quantile(0.25) q3 = df[col].quantile(0.75) iqr = q3 - q1 col_outlier_mask = (df[col] < (q1 - 1.5 * iqr)) | (df[col] > (q3 + 1.5 * iqr)) outlier_mask |= col_outlier_mask # count the number of outlier rows num_outliers = outlier_mask.sum() print(num_outliers) df_no_outliers = df.loc[~outlier_mask] # overwrite the original DataFrame with the new DataFrame without the outlier rows df = df_no_outliers plt.figure(figsize=(8, 12)) heatmap = sns.heatmap( df.corr()[["Yearly Amount Spent"]].sort_values( by="Yearly Amount Spent", ascending=False ), vmin=-1, vmax=1, annot=True, cmap="BrBG", ) x = df.drop(columns=["Yearly Amount Spent", "Time on Website", "Avatar"]) y = df["Yearly Amount Spent"] x.info X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Convert X and y to arrays and reshape y X_train = np.array(X_train) X_test = np.array(X_test) y_train = np.array(y_train).reshape(-1, 1) y_test = np.array(y_test).reshape(-1, 1) print(y_test.shape) # Add a column of ones to the input data for the intercept term X_train = np.concatenate([np.ones((X_train.shape[0], 1)), X_train], axis=1) X_test = np.concatenate([np.ones((X_test.shape[0], 1)), X_test], axis=1) # Initialize theta theta = np.zeros((X_train.shape[1], 1)) # Define the cost function def compute_cost(X, y, theta): m = len(y) J = (1 / (2 * m)) * np.sum(np.square(X.dot(theta) - y)) return J # Gradient descent algorithm def gradient_descent(X, y, theta, alpha, num_iters): m = len(y) J_history = np.zeros(num_iters) for i in range(num_iters): theta = theta - (alpha / m) * X.T.dot(X.dot(theta) - y) J_history[i] = compute_cost(X, y, theta) return theta, J_history alpha = 0.01 num_iters = 1000 theta, J_history = gradient_descent(X_train, y_train, theta, alpha, num_iters) # Make predictions on the test set y_pred = X_test.dot(theta) # Compute the R-squared score r_squared = r2_score(y_test, y_pred) print("R-squared:", r_squared)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/976/129976396.ipynb
focusing-on-mobile-app-or-website
kolawale
[{"Id": 129976396, "ScriptId": 38663678, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10690376, "CreationDate": "05/17/2023 21:29:52", "VersionNumber": 1.0, "Title": "notebooka1982ed0b8", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 155.0, "LinesInsertedFromPrevious": 155.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186418720, "KernelVersionId": 129976396, "SourceDatasetVersionId": 688160}]
[{"Id": 688160, "DatasetId": 349053, "DatasourceVersionId": 708003, "CreatorUserId": 3676607, "LicenseName": "Unknown", "CreationDate": "09/16/2019 19:04:27", "VersionNumber": 1.0, "Title": "Linear Regression E-commerce Dataset", "Slug": "focusing-on-mobile-app-or-website", "Subtitle": "This Dataset consist of customer amount spent on online shopping of clothes", "Description": "This dataset is having data of customers who buys clothes online. The store offers in-store style and clothing advice sessions. Customers come in to the store, have sessions/meetings with a personal stylist, then they can go home and order either on a mobile app or website for the clothes they want.\n\nThe company is trying to decide whether to focus their efforts on their mobile app experience or their website.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 349053, "CreatorUserId": 3676607, "OwnerUserId": 3676607.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 688160.0, "CurrentDatasourceVersionId": 708003.0, "ForumId": 360716, "Type": 2, "CreationDate": "09/16/2019 19:04:27", "LastActivityDate": "09/16/2019", "TotalViews": 55058, "TotalDownloads": 8340, "TotalVotes": 60, "TotalKernels": 48}]
[{"Id": 3676607, "UserName": "kolawale", "DisplayName": "Saurabh Kolawale", "RegisterDate": "09/07/2019", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, dataset file I/O (e.g. pd.read_csv) from sklearn.model_selection import train_test_split import seaborn as sns # to visualize random distributions import matplotlib.pyplot as plt from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder from sklearn.metrics import r2_score from scipy.stats.mstats import winsorize from sklearn.impute import SimpleImputer # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/focusing-on-mobile-app-or-website/Ecommerce Customers") df.head() df.info() df.duplicated().sum() counts = df["Avatar"].value_counts() print(counts) # plot the counts using a bar chart with wider bars plt.bar(counts.index, counts.values, color="steelblue", width=0.5) # add axis labels and a title plt.xlabel("Avatar character") plt.ylabel("Count") plt.title("Number of occurrences of each Avatar character in the DataFrame") # rotate the x-axis labels by 45 degrees plt.xticks(rotation=45) # display the plot plt.show() numeric_cols = df.select_dtypes(include=["int64", "float64"]) # loop over the columns and create a plot for each of them for col in numeric_cols.columns: plt.hist(numeric_cols[col], bins=10) plt.xlabel(col) plt.ylabel("Count") plt.title("Histogram of {}".format(col)) plt.show() df = df.drop(columns=["Email", "Address"]) df["Avatar"] = df["Avatar"].astype("category").cat.codes df.head df = (df - df.mean()) / df.std() # rescaling for all data df.head(501) # create an overall outlier mask for the DataFrame outlier_mask = pd.Series([False] * len(df)) # iterate over the columns in the DataFrame and calculate the outlier mask for each column for col in df.columns: q1 = df[col].quantile(0.25) q3 = df[col].quantile(0.75) iqr = q3 - q1 col_outlier_mask = (df[col] < (q1 - 1.5 * iqr)) | (df[col] > (q3 + 1.5 * iqr)) outlier_mask |= col_outlier_mask # count the number of outlier rows num_outliers = outlier_mask.sum() print(num_outliers) df_no_outliers = df.loc[~outlier_mask] # overwrite the original DataFrame with the new DataFrame without the outlier rows df = df_no_outliers plt.figure(figsize=(8, 12)) heatmap = sns.heatmap( df.corr()[["Yearly Amount Spent"]].sort_values( by="Yearly Amount Spent", ascending=False ), vmin=-1, vmax=1, annot=True, cmap="BrBG", ) x = df.drop(columns=["Yearly Amount Spent", "Time on Website", "Avatar"]) y = df["Yearly Amount Spent"] x.info X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Convert X and y to arrays and reshape y X_train = np.array(X_train) X_test = np.array(X_test) y_train = np.array(y_train).reshape(-1, 1) y_test = np.array(y_test).reshape(-1, 1) print(y_test.shape) # Add a column of ones to the input data for the intercept term X_train = np.concatenate([np.ones((X_train.shape[0], 1)), X_train], axis=1) X_test = np.concatenate([np.ones((X_test.shape[0], 1)), X_test], axis=1) # Initialize theta theta = np.zeros((X_train.shape[1], 1)) # Define the cost function def compute_cost(X, y, theta): m = len(y) J = (1 / (2 * m)) * np.sum(np.square(X.dot(theta) - y)) return J # Gradient descent algorithm def gradient_descent(X, y, theta, alpha, num_iters): m = len(y) J_history = np.zeros(num_iters) for i in range(num_iters): theta = theta - (alpha / m) * X.T.dot(X.dot(theta) - y) J_history[i] = compute_cost(X, y, theta) return theta, J_history alpha = 0.01 num_iters = 1000 theta, J_history = gradient_descent(X_train, y_train, theta, alpha, num_iters) # Make predictions on the test set y_pred = X_test.dot(theta) # Compute the R-squared score r_squared = r2_score(y_test, y_pred) print("R-squared:", r_squared)
false
0
1,430
0
1,553
1,430
129976937
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import r2_score from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import ( mean_squared_error, r2_score, mean_absolute_error, accuracy_score, ) from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split train_data = pd.read_csv("/kaggle/input/car-price-prediction-ru/train.csv") train_data.head(10) train_data.isnull().sum() train_data.columns train_data = train_data[["mileage", "car_id", "brand", "transmission", "price"]] train_data.mileage # Assuming your train data is stored in a pandas DataFrame called 'train_data' # and the column containing the mileage data is named 'mileage' mileage_series = train_data["mileage"] mileage_series = mileage_series.astype(str).str.replace("[^0-9]", "", regex=True) mileage_series = pd.to_numeric(mileage_series, errors="coerce") print(mileage_series) # Calculate the average of non-NaN values average_value = mileage_series.mean() # Fill NaN values with the average value mileage_series = mileage_series.fillna(average_value) mileage_series = mileage_series.astype("int64") print(mileage_series) train_data["mileage"] = mileage_series train_data train_data.car_id # Assuming your 'car_id' column is stored in a pandas Series called 'car_id_series' car_id_series = train_data["car_id"] # Convert 'car_id' to a sequential basis sequential_car_id = car_id_series.rank(method="dense") print(sequential_car_id) train_data["car_id"] = sequential_car_id label_encoder = LabelEncoder() train_data["brand"] = label_encoder.fit_transform(train_data["brand"]) label_encoder = LabelEncoder() train_data["transmission"] = label_encoder.fit_transform(train_data["transmission"]) train_data test_data = pd.read_csv("/kaggle/input/car-price-prediction-ru/test.csv") test_data.head() test_data = test_data[["car_id", "mileage", "transmission", "brand"]] train_data.isnull().sum() # Assuming your test data is stored in a pandas DataFrame called 'test_data' # and the column containing the mileage data is named 'mileage' # I named it milage_seri mileage_seri = test_data["mileage"] mileage_seri = mileage_seri.astype(str).str.replace("[^0-9]", "", regex=True) mileage_seri = pd.to_numeric(mileage_seri, errors="coerce") print(mileage_seri) # Calculate the average of non-NaN values average_value = mileage_seri.mean() # Fill NaN values with the average value mileage_seri = mileage_seri.fillna(average_value) mileage_seri = mileage_seri.astype("int64") print(mileage_seri) test_data["mileage"] = mileage_seri label_encoder = LabelEncoder() test_data["transmission"] = label_encoder.fit_transform(test_data["transmission"]) label_encoder = LabelEncoder() test_data["brand"] = label_encoder.fit_transform(test_data["brand"]) test_data X_train = train_data.drop(columns=["price"]) y_train = train_data["price"] n_estimators = 100 max_depth = 5 min_samples_split = 10 rf = RandomForestClassifier( n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split ) rf.fit(X_train, y_train) # Reorder the test_data columns to match the order of X_train columns test_data = test_data[X_train.columns] predictions = rf.predict(test_data) X_train_split, X_val_split, y_train_split, y_val_split = train_test_split( X_train, y_train, test_size=0.2, random_state=42 ) n_estimators = 100 max_depth = 5 min_samples_split = 10 rf = RandomForestClassifier( n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split ) rf.fit(X_train_split, y_train_split) val_predictions = rf.predict(X_val_split) # Calculate the mean squared error (MSE) mse = mean_squared_error(y_val_split, val_predictions) # Take the natural logarithm of the MSE ln_mse = np.log(mse) # Calculate the mean of the squared logarithmic errors mean_squared_ln_error = np.mean(ln_mse) # Take the square root of the mean squared logarithmic error to get RLMSE rlmse = np.sqrt(mean_squared_ln_error) print("RLMSE:", rlmse) def calculate_rlmse(true_values, predicted_values): # Calculate the mean squared error (MSE) mse = np.mean((y_val_split, val_predictions) ** 2) # Take the natural logarithm of the MSE ln_mse = np.log(mse) # Calculate the mean of the squared logarithmic errors mean_squared_ln_error = np.mean(ln_mse) # Take the square root of the mean squared logarithmic error to get RLMSE rlmse = np.sqrt(mean_squared_ln_error) return rlmse accuracy = accuracy_score(y_val_split, val_predictions) print("Validation Accuracy:", accuracy) # submission = pd.DataFrame({"car_id": test_data["car_id"], "predicted_price": predictions}) # submission.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/976/129976937.ipynb
null
null
[{"Id": 129976937, "ScriptId": 38614729, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14547994, "CreationDate": "05/17/2023 21:37:34", "VersionNumber": 3.0, "Title": "Car Price Prediction | Easy and Simple \ud83d\udd25\ud83d\udd25", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 180.0, "LinesInsertedFromPrevious": 55.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 125.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import r2_score from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import ( mean_squared_error, r2_score, mean_absolute_error, accuracy_score, ) from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split train_data = pd.read_csv("/kaggle/input/car-price-prediction-ru/train.csv") train_data.head(10) train_data.isnull().sum() train_data.columns train_data = train_data[["mileage", "car_id", "brand", "transmission", "price"]] train_data.mileage # Assuming your train data is stored in a pandas DataFrame called 'train_data' # and the column containing the mileage data is named 'mileage' mileage_series = train_data["mileage"] mileage_series = mileage_series.astype(str).str.replace("[^0-9]", "", regex=True) mileage_series = pd.to_numeric(mileage_series, errors="coerce") print(mileage_series) # Calculate the average of non-NaN values average_value = mileage_series.mean() # Fill NaN values with the average value mileage_series = mileage_series.fillna(average_value) mileage_series = mileage_series.astype("int64") print(mileage_series) train_data["mileage"] = mileage_series train_data train_data.car_id # Assuming your 'car_id' column is stored in a pandas Series called 'car_id_series' car_id_series = train_data["car_id"] # Convert 'car_id' to a sequential basis sequential_car_id = car_id_series.rank(method="dense") print(sequential_car_id) train_data["car_id"] = sequential_car_id label_encoder = LabelEncoder() train_data["brand"] = label_encoder.fit_transform(train_data["brand"]) label_encoder = LabelEncoder() train_data["transmission"] = label_encoder.fit_transform(train_data["transmission"]) train_data test_data = pd.read_csv("/kaggle/input/car-price-prediction-ru/test.csv") test_data.head() test_data = test_data[["car_id", "mileage", "transmission", "brand"]] train_data.isnull().sum() # Assuming your test data is stored in a pandas DataFrame called 'test_data' # and the column containing the mileage data is named 'mileage' # I named it milage_seri mileage_seri = test_data["mileage"] mileage_seri = mileage_seri.astype(str).str.replace("[^0-9]", "", regex=True) mileage_seri = pd.to_numeric(mileage_seri, errors="coerce") print(mileage_seri) # Calculate the average of non-NaN values average_value = mileage_seri.mean() # Fill NaN values with the average value mileage_seri = mileage_seri.fillna(average_value) mileage_seri = mileage_seri.astype("int64") print(mileage_seri) test_data["mileage"] = mileage_seri label_encoder = LabelEncoder() test_data["transmission"] = label_encoder.fit_transform(test_data["transmission"]) label_encoder = LabelEncoder() test_data["brand"] = label_encoder.fit_transform(test_data["brand"]) test_data X_train = train_data.drop(columns=["price"]) y_train = train_data["price"] n_estimators = 100 max_depth = 5 min_samples_split = 10 rf = RandomForestClassifier( n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split ) rf.fit(X_train, y_train) # Reorder the test_data columns to match the order of X_train columns test_data = test_data[X_train.columns] predictions = rf.predict(test_data) X_train_split, X_val_split, y_train_split, y_val_split = train_test_split( X_train, y_train, test_size=0.2, random_state=42 ) n_estimators = 100 max_depth = 5 min_samples_split = 10 rf = RandomForestClassifier( n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split ) rf.fit(X_train_split, y_train_split) val_predictions = rf.predict(X_val_split) # Calculate the mean squared error (MSE) mse = mean_squared_error(y_val_split, val_predictions) # Take the natural logarithm of the MSE ln_mse = np.log(mse) # Calculate the mean of the squared logarithmic errors mean_squared_ln_error = np.mean(ln_mse) # Take the square root of the mean squared logarithmic error to get RLMSE rlmse = np.sqrt(mean_squared_ln_error) print("RLMSE:", rlmse) def calculate_rlmse(true_values, predicted_values): # Calculate the mean squared error (MSE) mse = np.mean((y_val_split, val_predictions) ** 2) # Take the natural logarithm of the MSE ln_mse = np.log(mse) # Calculate the mean of the squared logarithmic errors mean_squared_ln_error = np.mean(ln_mse) # Take the square root of the mean squared logarithmic error to get RLMSE rlmse = np.sqrt(mean_squared_ln_error) return rlmse accuracy = accuracy_score(y_val_split, val_predictions) print("Validation Accuracy:", accuracy) # submission = pd.DataFrame({"car_id": test_data["car_id"], "predicted_price": predictions}) # submission.to_csv("submission.csv", index=False)
false
0
1,765
0
1,765
1,765
129976927
<jupyter_start><jupyter_text>🎹 Spotify Tracks Dataset # Content This is a dataset of Spotify tracks over a range of **125** different genres. Each track has some audio features associated with it. The data is in `CSV` format which is tabular and can be loaded quickly. # Usage The dataset can be used for: - Building a **Recommendation System** based on some user input or preference - **Classification** purposes based on audio features and available genres - Any other application that you can think of. Feel free to discuss! # Column Description - **track_id**: The Spotify ID for the track - **artists**: The artists' names who performed the track. If there is more than one artist, they are separated by a `;` - **album_name**: The album name in which the track appears - **track_name**: Name of the track - **popularity**: **The popularity of a track is a value between 0 and 100, with 100 being the most popular**. The popularity is calculated by algorithm and is based, in the most part, on the total number of plays the track has had and how recent those plays are. Generally speaking, songs that are being played a lot now will have a higher popularity than songs that were played a lot in the past. Duplicate tracks (e.g. the same track from a single and an album) are rated independently. Artist and album popularity is derived mathematically from track popularity. - **duration_ms**: The track length in milliseconds - **explicit**: Whether or not the track has explicit lyrics (true = yes it does; false = no it does not OR unknown) - **danceability**: Danceability describes how suitable a track is for dancing based on a combination of musical elements including tempo, rhythm stability, beat strength, and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable - **energy**: Energy is a measure from 0.0 to 1.0 and represents a perceptual measure of intensity and activity. Typically, energetic tracks feel fast, loud, and noisy. For example, death metal has high energy, while a Bach prelude scores low on the scale - **key**: The key the track is in. Integers map to pitches using standard Pitch Class notation. E.g. `0 = C`, `1 = C♯/D♭`, `2 = D`, and so on. If no key was detected, the value is -1 - **loudness**: The overall loudness of a track in decibels (dB) - **mode**: Mode indicates the modality (major or minor) of a track, the type of scale from which its melodic content is derived. Major is represented by 1 and minor is 0 - **speechiness**: Speechiness detects the presence of spoken words in a track. The more exclusively speech-like the recording (e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value. Values above 0.66 describe tracks that are probably made entirely of spoken words. Values between 0.33 and 0.66 describe tracks that may contain both music and speech, either in sections or layered, including such cases as rap music. Values below 0.33 most likely represent music and other non-speech-like tracks - **acousticness**: A confidence measure from 0.0 to 1.0 of whether the track is acoustic. 1.0 represents high confidence the track is acoustic - **instrumentalness**: Predicts whether a track contains no vocals. "Ooh" and "aah" sounds are treated as instrumental in this context. Rap or spoken word tracks are clearly "vocal". The closer the instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content - **liveness**: Detects the presence of an audience in the recording. Higher liveness values represent an increased probability that the track was performed live. A value above 0.8 provides strong likelihood that the track is live - **valence**: A measure from 0.0 to 1.0 describing the musical positiveness conveyed by a track. Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while tracks with low valence sound more negative (e.g. sad, depressed, angry) - **tempo**: The overall estimated tempo of a track in beats per minute (BPM). In musical terminology, tempo is the speed or pace of a given piece and derives directly from the average beat duration - **time_signature**: An estimated time signature. The time signature (meter) is a notational convention to specify how many beats are in each bar (or measure). The time signature ranges from 3 to 7 indicating time signatures of `3/4`, to `7/4`. - **track_genre**: The genre in which the track belongs # Acknowledgement Image credits: [BPR world](https://www.bprworld.com/news/spotify-vs-radio-the-battle-continues/) Kaggle dataset identifier: spotify-tracks-dataset <jupyter_script># # **Work in progress!** import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt spt_music = pd.read_csv("/kaggle/input/-spotify-tracks-dataset/dataset.csv") spt_music.info() spt_music.head() spt_music.describe() spt_music_a = spt_music.sort_values("popularity", ascending=False) spt_music_a.head(10) spt_music_clean = spt_music.drop_duplicates( subset="track_id", keep="first", inplace=False ) spt_music_clean_popul = spt_music_clean.sort_values("popularity", ascending=False) spt_music_clean_popul spt_music_obj = spt_music_clean[ [ "popularity", "track_name", "artists", "album_name", "duration_ms", "tempo", "time_signature", "key", "mode", "loudness", "explicit", ] ] popul_obj = spt_music_obj.sort_values("popularity", ascending=False) popul_obj popul_obj.head(30) spt_music_subj = spt_music_clean[ [ "popularity", "track_name", "artists", "album_name", "danceability", "energy", "instrumentalness", "valence", "acousticness", "liveness", ] ] popul_subj = spt_music_subj.sort_values("popularity", ascending=False) popul_subj popul_subj.head(30) # ## Remember!, Work in progress!! 💻 + 🐍 art_popul = ( popul_obj.groupby(["artists", "explicit"])["popularity"] .mean() .sort_values(ascending=False) .head(25) ) art_popul mean_tempo = popul_obj["tempo"].mean() mean_tempo
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/976/129976927.ipynb
spotify-tracks-dataset
maharshipandya
[{"Id": 129976927, "ScriptId": 37797898, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10738943, "CreationDate": "05/17/2023 21:37:27", "VersionNumber": 12.0, "Title": "Quantifying_Music", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 51.0, "LinesInsertedFromPrevious": 16.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 35.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186419425, "KernelVersionId": 129976927, "SourceDatasetVersionId": 4372070}]
[{"Id": 4372070, "DatasetId": 2570056, "DatasourceVersionId": 4430808, "CreatorUserId": 7899307, "LicenseName": "Database: Open Database, Contents: \u00a9 Original Authors", "CreationDate": "10/22/2022 14:40:15", "VersionNumber": 1.0, "Title": "\ud83c\udfb9 Spotify Tracks Dataset", "Slug": "spotify-tracks-dataset", "Subtitle": "A dataset of Spotify songs with different genres and their audio features", "Description": "# Content\n\nThis is a dataset of Spotify tracks over a range of **125** different genres. Each track has some audio features associated with it. The data is in `CSV` format which is tabular and can be loaded quickly.\n\n# Usage\n\nThe dataset can be used for:\n\n- Building a **Recommendation System** based on some user input or preference\n- **Classification** purposes based on audio features and available genres\n- Any other application that you can think of. Feel free to discuss!\n\n# Column Description\n\n- **track_id**: The Spotify ID for the track\n- **artists**: The artists' names who performed the track. If there is more than one artist, they are separated by a `;`\n- **album_name**: The album name in which the track appears\n- **track_name**: Name of the track\n- **popularity**: **The popularity of a track is a value between 0 and 100, with 100 being the most popular**. The popularity is calculated by algorithm and is based, in the most part, on the total number of plays the track has had and how recent those plays are. Generally speaking, songs that are being played a lot now will have a higher popularity than songs that were played a lot in the past. Duplicate tracks (e.g. the same track from a single and an album) are rated independently. Artist and album popularity is derived mathematically from track popularity.\n- **duration_ms**: The track length in milliseconds\n- **explicit**: Whether or not the track has explicit lyrics (true = yes it does; false = no it does not OR unknown)\n- **danceability**: Danceability describes how suitable a track is for dancing based on a combination of musical elements including tempo, rhythm stability, beat strength, and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable\n- **energy**: Energy is a measure from 0.0 to 1.0 and represents a perceptual measure of intensity and activity. Typically, energetic tracks feel fast, loud, and noisy. For example, death metal has high energy, while a Bach prelude scores low on the scale\n- **key**: The key the track is in. Integers map to pitches using standard Pitch Class notation. E.g. `0 = C`, `1 = C\u266f/D\u266d`, `2 = D`, and so on. If no key was detected, the value is -1\n- **loudness**: The overall loudness of a track in decibels (dB)\n- **mode**: Mode indicates the modality (major or minor) of a track, the type of scale from which its melodic content is derived. Major is represented by 1 and minor is 0\n- **speechiness**: Speechiness detects the presence of spoken words in a track. The more exclusively speech-like the recording (e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value. Values above 0.66 describe tracks that are probably made entirely of spoken words. Values between 0.33 and 0.66 describe tracks that may contain both music and speech, either in sections or layered, including such cases as rap music. Values below 0.33 most likely represent music and other non-speech-like tracks\n- **acousticness**: A confidence measure from 0.0 to 1.0 of whether the track is acoustic. 1.0 represents high confidence the track is acoustic\n- **instrumentalness**: Predicts whether a track contains no vocals. \"Ooh\" and \"aah\" sounds are treated as instrumental in this context. Rap or spoken word tracks are clearly \"vocal\". The closer the instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content\n- **liveness**: Detects the presence of an audience in the recording. Higher liveness values represent an increased probability that the track was performed live. A value above 0.8 provides strong likelihood that the track is live\n- **valence**: A measure from 0.0 to 1.0 describing the musical positiveness conveyed by a track. Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while tracks with low valence sound more negative (e.g. sad, depressed, angry)\n- **tempo**: The overall estimated tempo of a track in beats per minute (BPM). In musical terminology, tempo is the speed or pace of a given piece and derives directly from the average beat duration\n- **time_signature**: An estimated time signature. The time signature (meter) is a notational convention to specify how many beats are in each bar (or measure). The time signature ranges from 3 to 7 indicating time signatures of `3/4`, to `7/4`.\n- **track_genre**: The genre in which the track belongs\n\n# Acknowledgement\n\nImage credits: [BPR world](https://www.bprworld.com/news/spotify-vs-radio-the-battle-continues/)", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2570056, "CreatorUserId": 7899307, "OwnerUserId": 7899307.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4372070.0, "CurrentDatasourceVersionId": 4430808.0, "ForumId": 2599627, "Type": 2, "CreationDate": "10/22/2022 14:40:15", "LastActivityDate": "10/22/2022", "TotalViews": 74784, "TotalDownloads": 11612, "TotalVotes": 194, "TotalKernels": 29}]
[{"Id": 7899307, "UserName": "maharshipandya", "DisplayName": "MaharshiPandya", "RegisterDate": "07/14/2021", "PerformanceTier": 2}]
# # **Work in progress!** import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt spt_music = pd.read_csv("/kaggle/input/-spotify-tracks-dataset/dataset.csv") spt_music.info() spt_music.head() spt_music.describe() spt_music_a = spt_music.sort_values("popularity", ascending=False) spt_music_a.head(10) spt_music_clean = spt_music.drop_duplicates( subset="track_id", keep="first", inplace=False ) spt_music_clean_popul = spt_music_clean.sort_values("popularity", ascending=False) spt_music_clean_popul spt_music_obj = spt_music_clean[ [ "popularity", "track_name", "artists", "album_name", "duration_ms", "tempo", "time_signature", "key", "mode", "loudness", "explicit", ] ] popul_obj = spt_music_obj.sort_values("popularity", ascending=False) popul_obj popul_obj.head(30) spt_music_subj = spt_music_clean[ [ "popularity", "track_name", "artists", "album_name", "danceability", "energy", "instrumentalness", "valence", "acousticness", "liveness", ] ] popul_subj = spt_music_subj.sort_values("popularity", ascending=False) popul_subj popul_subj.head(30) # ## Remember!, Work in progress!! 💻 + 🐍 art_popul = ( popul_obj.groupby(["artists", "explicit"])["popularity"] .mean() .sort_values(ascending=False) .head(25) ) art_popul mean_tempo = popul_obj["tempo"].mean() mean_tempo
false
1
533
0
1,726
533
129976621
<jupyter_start><jupyter_text>credit_risk_customers This dataset consists of 20 features of the customers. It could be used to predict if the customer could be given credit. Many features require data cleaning. This is a great dataset for practicing data cleaning and feature engineering and building a binary classification model. Kaggle dataset identifier: credit-risk-customers <jupyter_script># # Credit Risk of Customers # The dataset consists of 20 features and a class. # It could be used to predict if the customer could be given credit. # Many features require data cleaning. # This is a great dataset for practicing data cleaning and feature engineering and building a binary classification model. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import re from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, MinMaxScaler from sklearn.model_selection import GridSearchCV from sklearn.metrics import ( classification_report, accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, ) from imblearn.over_sampling import RandomOverSampler from imblearn.over_sampling import SMOTE from xgboost import XGBClassifier from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression # # Preparing, Cleaning and Analyzing the Dataset # Importing the dataset df = pd.read_csv("/kaggle/input/credit-risk-customers/credit_customers.csv") df.head().T # Check if there is null values in any column df.info() print(df["class"].value_counts()) # Getting information about the dataset df.describe().T df.dtypes # loop through each column in the dataframe and count unique values for column in df.columns: unique_values = df[column].nunique() print(f"{column}: {unique_values}") plt.figure(figsize=(10, 50)) c = [ "tab:blue" ] # , 'tab:orange', 'tab:green', 'tab:purple', 'tab:red', 'tab:cyan', 'tab:gray', 'tab:brown', 'tab:olive'] category_list = df.select_dtypes(include="object").columns """ category_list=[] # iterating the columns for getting categories for item in df.columns: if df[item].dtypes=='O': #add to list only objects category_list.append(item) """ x = 0 for i in category_list: x += 1 plt.subplot(11, 2, x) plt.xticks(rotation=90) plt.gca().set_title(i) num_category = df[i].nunique() # get number of categories each column df[i].value_counts().plot( kind="bar", color=c[0:num_category], alpha=1.0, grid=False ) if x == 1 or x == 2: plt.tight_layout() new_df = df.copy() new_df.info() sns.set_theme(style="whitegrid", palette="pastel") plt.figure(figsize=(10, 50)) category_list = df.select_dtypes(include="object").columns """ # iterating the columns for getting categories category_list=[] for item in df.columns: if item=='class' or item=='age' or item=='duration' or item=='credit_amount' or item=='installment_commitment'or item=='residence_since'or item=='existing_credits': category_list.append(item) else: category_list.append(item) """ x = 0 for i in category_list: x += 1 plt.subplot(11, 2, x) plt.xticks(rotation=90) # plt.gca().set_title(i) sns.countplot(x=i, hue="class", data=df) if x == 1 or x == 2: plt.tight_layout() def Label_Encoder(df): object_cols = df.select_dtypes(include="object").columns for col in object_cols: df[col] = LabelEncoder().fit_transform(df[col]) return df Label_Encoder(df) df.info() # Correlation matrix fig = plt.gcf() fig.set_size_inches(12, 8) plt.title("Correlation Between The Dimensions") a = sns.heatmap( df.corr(), annot=True, annot_kws={"size": 10}, fmt=".2f", linewidths=0.2 ) a.set_xticklabels(a.get_xticklabels(), rotation=90) a.set_yticklabels(a.get_yticklabels(), rotation=0) plt.show() # df['class'] = LabelEncoder().fit_transform(df['class']) plt.figure(figsize=(10, 5), dpi=100) heatmap = sns.heatmap( df.corr()[["class"]].sort_values(by="class", ascending=False), vmin=-1, vmax=1, annot=True, ) heatmap.set_title("Features Correlating with class", fontdict={"fontsize": 10}, pad=10) df = new_df.copy() df.info() # # Feature Selection and Extraction, and Reducing Dimension # define a function to drop words from feature def drop_words(s, words): for word in words: s = s.replace(word, "") return s.strip() # create a new column df["employment_new"] = "employed" # define a regular expression pattern gender_pattern = re.compile(r"(unemployed)", flags=re.IGNORECASE) # loop through each row in the dataframe for index, row in df.iterrows(): match = gender_pattern.search(row["employment"]) if match: df.at[index, "employment_new"] = match.group() row["employment"] = gender_pattern.sub("", row["employment"]) # drop the words words_to_drop = [">", "<", "=", "unemployed"] df["employment"] = df["employment"].apply(lambda x: drop_words(x, words_to_drop)) # df.drop('personal_status', inplace=True, axis=1) del df["employment"] # create a new column df["gender"] = "Unknown" # define a regular expression pattern gender_pattern = re.compile(r"(male|female)", flags=re.IGNORECASE) # loop through each row in the dataframe for index, row in df.iterrows(): match = gender_pattern.search(row["personal_status"]) if match: df.at[index, "gender"] = match.group() row["personal_status"] = gender_pattern.sub("", row["personal_status"]) # drop the words words_to_drop = ["male", "female", "fe"] df["personal_status"] = df["personal_status"].apply( lambda x: drop_words(x, words_to_drop) ) # df.drop('personal_status', inplace=True, axis=1) del df["personal_status"] # create a new column df["job_new"] = "high_qualified" # define a regular expression pattern job_pattern = re.compile(r"(skilled|unskilled)", flags=re.IGNORECASE) # loop through each row in the dataframe for index, row in df.iterrows(): match = job_pattern.search(row["job"]) if match: df.at[index, "job_new"] = match.group() row["job"] = job_pattern.sub("", row["job"]) # drop the words words_to_drop = ["skilled", "unskilled"] df["job"] = df["job"].apply(lambda x: drop_words(x, words_to_drop)) # df.drop('personal_status', inplace=True, axis=1) del df["job"] # create a new column df["credit_history_new"] = "existing_paid" # define a regular expression pattern credit_pattern = re.compile(r"(all paid|delayed previously)", flags=re.IGNORECASE) # loop through each row in the dataframe for index, row in df.iterrows(): match = credit_pattern.search(row["credit_history"]) if match: df.at[index, "credit_history_new"] = match.group() row["credit_history"] = credit_pattern.sub("", row["credit_history"]) # drop the words words_to_drop = ["all paid", "delayed previously"] df["credit_history"] = df["credit_history"].apply( lambda x: drop_words(x, words_to_drop) ) # df.drop('personal_status', inplace=True, axis=1) del df["credit_history"] # create a new column df["housing_new"] = "rent" # define a regular expression pattern gender_pattern = re.compile(r"(own)", flags=re.IGNORECASE) # loop through each row in the dataframe for index, row in df.iterrows(): match = gender_pattern.search(row["housing"]) if match: df.at[index, "housing_new"] = match.group() row["housing"] = gender_pattern.sub("", row["housing"]) # drop the words words_to_drop = ["rent", "own", "free"] df["housing"] = df["housing"].apply(lambda x: drop_words(x, words_to_drop)) # df.drop('personal_status', inplace=True, axis=1) del df["housing"] del df["foreign_worker"] del df["own_telephone"] del df["num_dependents"] del df["residence_since"] del df["other_parties"] del df["installment_commitment"] del df["property_magnitude"] del df["other_payment_plans"] del df["existing_credits"] del df["credit_amount"] # Age Group bins = [0, 30, 40, 50, 60, 70, 120] # Define age groups labels = ["0-30", "31-40", "41-50", "51-60", "61-70", "70+"] df["age_group"] = pd.cut(df["age"], bins=bins, labels=labels, include_lowest=True) df["age_group"] = df["age_group"].astype(object) del df["age"] # Duration Group bins = [0, 12, 24, 36, 48, 60, 72] # Define duration groups labels = ["0-12", "13-24", "25-36", "37-48", "49-60", "61-72"] df["duration_group"] = pd.cut( df["duration"], bins=bins, labels=labels, include_lowest=True ) df["duration_group"] = df["duration_group"].astype(object) del df["duration"] new_df = df.copy() new_df.info() sns.set_theme(style="whitegrid", palette="pastel") plt.figure(figsize=(10, 50)) category_list = df.select_dtypes(include="object").columns """ # iterating the columns for getting categories category_list=[] for item in df.columns: if item=='class' or item=='age' or item=='duration' or item=='credit_amount' or item=='installment_commitment'or item=='residence_since'or item=='existing_credits': category_list.append(item) else: category_list.append(item) """ x = 0 for i in category_list: x += 1 plt.subplot(11, 2, x) plt.xticks(rotation=90) # plt.gca().set_title(i) sns.countplot(x=i, hue="class", data=df) if x == 1 or x == 2: plt.tight_layout() df.info() Label_Encoder(df) df.info() # Correlation matrix fig = plt.gcf() fig.set_size_inches(15, 10) plt.title("Correlation Between The Dimensions") a = sns.heatmap( df.corr(), annot=True, annot_kws={"size": 10}, fmt=".2f", linewidths=0.2 ) a.set_xticklabels(a.get_xticklabels(), rotation=90) a.set_yticklabels(a.get_yticklabels(), rotation=0) plt.show() df.info() # price_range-class correlations # df['class'] = LabelEncoder().fit_transform(df['class']) plt.figure(figsize=(10, 5), dpi=100) heatmap = sns.heatmap( df.corr()[["class"]].sort_values(by="class", ascending=False), vmin=-1, vmax=1, annot=True, ) heatmap.set_title("Features Correlating with class", fontdict={"fontsize": 10}, pad=10) df = new_df.copy() df.info() # del df['age_group'] # del df['gender'] del df["job_new"] del df["employment_new"] df.info() # # Model Building and Configuration # apply function to df df = Label_Encoder(df) df.head().T # normalize the numerical columns using MinMaxScaler scaler = MinMaxScaler() feature = [ "checking_status", "purpose", "savings_status", "credit_history_new", "housing_new", "duration_group", "age_group", "gender", ] df[feature] = scaler.fit_transform(df[feature]) # balance the dataset using SMOTE smote = SMOTE() X = df.drop("class", axis=1) y = df["class"] X, y = smote.fit_resample(X, y) df = pd.concat([X, y], axis=1) # split the dataset into train and test sets X_train, X_test, y_train, y_test = train_test_split( df.drop("class", axis=1), df["class"], test_size=0.3, random_state=42 ) X_train.shape, y_train.shape def feature_importance_graph(model): # Create plot plt.figure(figsize=(8, 4)) # Create plot title plt.title("Feature Importance") # Get feature importances importance = model.feature_importances_ # Sort feature importances in descending order indices = np.argsort(importance) # Rearrange feature names so they match the sorted feature importances names = [X_train.columns[i] for i in indices] # Add bars plt.barh(range(X_train.shape[1]), importance[indices]) # Add feature names as y-axis labels, rotated vertically plt.yticks(range(X_train.shape[1]), names, fontsize=10, rotation=0) # Show plot plt.show() ros = RandomOverSampler(random_state=0) X_train, y_train = ros.fit_resample(X_train, y_train) # ## XGBoost # use GridSearchCV to find the best hyperparameters for XGBoost param_grid = { "max_depth": [3, 5, 7, 9], "learning_rate": [0.1, 0.01, 0.001], "n_estimators": [100, 500, 1000], } xgb = XGBClassifier(random_state=42) grid_search = GridSearchCV( xgb, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1, verbose=0 ) grid_search.fit(X_train, y_train) print("Best parameters: ", grid_search.best_params_) # train the XGBoost model xgb = XGBClassifier( max_depth=grid_search.best_params_["max_depth"], learning_rate=grid_search.best_params_["learning_rate"], n_estimators=grid_search.best_params_["n_estimators"], random_state=42, ) xgb.fit(X_train, y_train) # evaluate the performance of the model y_pred = xgb.predict(X_test) print(classification_report(y_test, y_pred)) xgb_accuracy = accuracy_score(y_test, y_pred) xgb_f1 = f1_score(y_test, y_pred) xgb_recall = recall_score(y_test, y_pred) xgb_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) feature_importance_graph(xgb) # ## Decision Tree # use GridSearchCV to find the best hyperparameters for Decision Tree param_grid = { "max_depth": [3, 4, 5, 6, 7, 9, 11], "min_samples_split": [2, 3, 4, 5, 6, 7], "criterion": ["entropy", "gini"], } model = DecisionTreeClassifier(random_state=42) grid_search = GridSearchCV( model, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1, verbose=0 ) grid_search.fit(X_train, y_train) print("Best parameters: ", grid_search.best_params_) decision_tree = DecisionTreeClassifier( max_depth=grid_search.best_params_["max_depth"], min_samples_split=grid_search.best_params_["min_samples_split"], criterion=grid_search.best_params_["criterion"], random_state=42, ) decision_tree.fit(X_train, y_train) # evaluate the performance of the model y_pred = decision_tree.predict(X_test) print(classification_report(y_test, y_pred)) tree_accuracy = accuracy_score(y_test, y_pred) tree_f1 = f1_score(y_test, y_pred) tree_recall = recall_score(y_test, y_pred) tree_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) feature_importance_graph(decision_tree) # ## Random Forest # use GridSearchCV to find the best hyperparameters for RandomForest param_grid = { "max_depth": [3, 4, 5, 6, 7, 9, 11], "min_samples_split": [2, 3, 4, 5, 6, 7], "criterion": ["entropy", "gini"], } model = RandomForestClassifier(random_state=42) grid_search = GridSearchCV( model, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1, verbose=0 ) grid_search.fit(X_train, y_train) print("Best parameters: ", grid_search.best_params_) random_forest = RandomForestClassifier( max_depth=grid_search.best_params_["max_depth"], min_samples_split=grid_search.best_params_["min_samples_split"], criterion=grid_search.best_params_["criterion"], random_state=42, ) random_forest.fit(X_train, y_train) # evaluate the performance of the model y_pred = random_forest.predict(X_test) print(classification_report(y_test, y_pred)) random_accuracy = accuracy_score(y_test, y_pred) random_f1 = f1_score(y_test, y_pred) random_recall = recall_score(y_test, y_pred) random_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) feature_importance_graph(random_forest) # ## Extra Trees # use GridSearchCV to find the best hyperparameters for Extra Trees param_grid = { "max_depth": [3, 4, 5, 6, 7, 9, 11], "min_samples_split": [2, 3, 4, 5, 6, 7], "criterion": ["entropy", "gini"], } #'n_estimators': [100, 500, 1000] model = ExtraTreesClassifier(random_state=42) grid_search = GridSearchCV( model, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1, verbose=0 ) grid_search.fit(X_train, y_train) print("Best parameters: ", grid_search.best_params_) extra_trees = ExtraTreesClassifier( max_depth=grid_search.best_params_["max_depth"], min_samples_split=grid_search.best_params_["min_samples_split"], criterion=grid_search.best_params_["criterion"], random_state=42, ) extra_trees.fit(X_train, y_train) # evaluate the performance of the model y_pred = extra_trees.predict(X_test) print(classification_report(y_test, y_pred)) extra_accuracy = accuracy_score(y_test, y_pred) extra_f1 = f1_score(y_test, y_pred) extra_recall = recall_score(y_test, y_pred) extra_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) feature_importance_graph(extra_trees) # ## Naive Bayes naive_bayes = GaussianNB() naive_bayes.fit(X_train, y_train) # evaluate the performance of the model y_pred = naive_bayes.predict(X_test) print(classification_report(y_test, y_pred)) nb_accuracy = accuracy_score(y_test, y_pred) nb_f1 = f1_score(y_test, y_pred) nb_recall = recall_score(y_test, y_pred) nb_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) # ## KNN # use GridSearchCV to find the best hyperparameters for KNN param_grid = { "n_neighbors": [1, 2, 3, 4, 5, 7, 9], "p": [2, 3, 4, 5, 6, 7], "metric": ["minkowski", "euclidean", "manhattan"], } model = KNeighborsClassifier() grid_search = GridSearchCV( model, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1, verbose=0 ) grid_search.fit(X_train, y_train) print("Best parameters: ", grid_search.best_params_) knn = KNeighborsClassifier( n_neighbors=grid_search.best_params_["n_neighbors"], p=grid_search.best_params_["p"], metric=grid_search.best_params_["metric"], ) knn.fit(X_train, y_train) """ k_list = list(range(1,10)) k_values = dict(n_neighbors = k_list) grid = GridSearchCV(knn, k_values, cv = 2, scoring = 'accuracy', n_jobs = -1) grid.fit(X_train, y_train) """ # evaluate the performance of the model y_pred = knn.predict(X_test) print(classification_report(y_test, y_pred)) knn_accuracy = accuracy_score(y_test, y_pred) knn_f1 = f1_score(y_test, y_pred) knn_recall = recall_score(y_test, y_pred) knn_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) # ## Logistic Regression # #### It did relatively well at predicting both outcomes, but it's still not a good model, we have better ones. log_reg = LogisticRegression(random_state=42, max_iter=1000) log_reg.fit(X_train, y_train) # evaluate the performance of the model y_pred = log_reg.predict(X_test) print(classification_report(y_test, y_pred)) logistic_accuracy = accuracy_score(y_test, y_pred) logistic_f1 = f1_score(y_test, y_pred) logistic_recall = recall_score(y_test, y_pred) logistic_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) # Summary xgb_dict = { "Model": "XGB", "Accuracy": round(float(xgb_accuracy), 2), "F1": round(float(xgb_f1), 2), "Recall": round(float(xgb_recall), 2), "Precision": round(float(xgb_precision), 2), } tree_dict = { "Model": "Decision Tree", "Accuracy": round(float(tree_accuracy), 2), "F1": round(float(tree_f1), 2), "Recall": round(float(tree_recall), 2), "Precision": round(float(tree_precision), 2), } random_dict = { "Model": "Random Forest", "Accuracy": round(float(random_accuracy), 2), "F1": round(float(random_f1), 2), "Recall": round(float(random_recall), 2), "Precision": round(float(random_precision), 2), } extra_dict = { "Model": "Extra Tree", "Accuracy": round(float(extra_accuracy), 2), "F1": round(float(extra_f1), 2), "Recall": round(float(extra_recall), 2), "Precision": round(float(extra_precision), 2), } nb_dict = { "Model": "Naive Bayes", "Accuracy": round(float(nb_accuracy), 2), "F1": round(float(nb_f1), 2), "Recall": round(float(nb_recall), 2), "Precision": round(float(nb_precision), 2), } knn_dict = { "Model": "KNN", "Accuracy": round(float(knn_accuracy), 2), "F1": round(float(knn_f1), 2), "Recall": round(float(knn_recall), 2), "Precision": round(float(knn_precision), 2), } logistic_dict = { "Model": "Logistic Regression", "Accuracy": round(float(logistic_accuracy), 2), "F1": round(float(logistic_f1), 2), "Recall": round(float(logistic_recall), 2), "Precision": round(float(logistic_precision), 2), } result = pd.DataFrame( { "XGB": pd.Series(xgb_dict), "Decision Tree": pd.Series(tree_dict), "Random Forest": pd.Series(random_dict), "Extra Trees": pd.Series(extra_dict), "Naive Bayes": pd.Series(nb_dict), "KNN": pd.Series(knn_dict), "Logistic Regression": pd.Series(logistic_dict), } ) result
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/976/129976621.ipynb
credit-risk-customers
ppb00x
[{"Id": 129976621, "ScriptId": 38629395, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10804080, "CreationDate": "05/17/2023 21:33:07", "VersionNumber": 3.0, "Title": "PAD-2: Credit Risk Assessment-2 (85%)", "EvaluationDate": "05/17/2023", "IsChange": false, "TotalLines": 691.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 691.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186419025, "KernelVersionId": 129976621, "SourceDatasetVersionId": 5380797}]
[{"Id": 5380797, "DatasetId": 3119852, "DatasourceVersionId": 5454434, "CreatorUserId": 10465532, "LicenseName": "Other (specified in description)", "CreationDate": "04/12/2023 08:28:28", "VersionNumber": 3.0, "Title": "credit_risk_customers", "Slug": "credit-risk-customers", "Subtitle": "Predict if customers are risky or not for credit", "Description": "This dataset consists of 20 features of the customers.\nIt could be used to predict if the customer could be given credit.\nMany features require data cleaning.\nThis is a great dataset for practicing data cleaning and feature engineering and building a binary classification model.", "VersionNotes": "Data Update 2023-04-12", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3119852, "CreatorUserId": 10465532, "OwnerUserId": 10465532.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5380797.0, "CurrentDatasourceVersionId": 5454434.0, "ForumId": 3183328, "Type": 2, "CreationDate": "04/12/2023 05:17:40", "LastActivityDate": "04/12/2023", "TotalViews": 33445, "TotalDownloads": 4955, "TotalVotes": 92, "TotalKernels": 24}]
[{"Id": 10465532, "UserName": "ppb00x", "DisplayName": "srihari p", "RegisterDate": "05/07/2022", "PerformanceTier": 2}]
# # Credit Risk of Customers # The dataset consists of 20 features and a class. # It could be used to predict if the customer could be given credit. # Many features require data cleaning. # This is a great dataset for practicing data cleaning and feature engineering and building a binary classification model. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import re from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, MinMaxScaler from sklearn.model_selection import GridSearchCV from sklearn.metrics import ( classification_report, accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, ) from imblearn.over_sampling import RandomOverSampler from imblearn.over_sampling import SMOTE from xgboost import XGBClassifier from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression # # Preparing, Cleaning and Analyzing the Dataset # Importing the dataset df = pd.read_csv("/kaggle/input/credit-risk-customers/credit_customers.csv") df.head().T # Check if there is null values in any column df.info() print(df["class"].value_counts()) # Getting information about the dataset df.describe().T df.dtypes # loop through each column in the dataframe and count unique values for column in df.columns: unique_values = df[column].nunique() print(f"{column}: {unique_values}") plt.figure(figsize=(10, 50)) c = [ "tab:blue" ] # , 'tab:orange', 'tab:green', 'tab:purple', 'tab:red', 'tab:cyan', 'tab:gray', 'tab:brown', 'tab:olive'] category_list = df.select_dtypes(include="object").columns """ category_list=[] # iterating the columns for getting categories for item in df.columns: if df[item].dtypes=='O': #add to list only objects category_list.append(item) """ x = 0 for i in category_list: x += 1 plt.subplot(11, 2, x) plt.xticks(rotation=90) plt.gca().set_title(i) num_category = df[i].nunique() # get number of categories each column df[i].value_counts().plot( kind="bar", color=c[0:num_category], alpha=1.0, grid=False ) if x == 1 or x == 2: plt.tight_layout() new_df = df.copy() new_df.info() sns.set_theme(style="whitegrid", palette="pastel") plt.figure(figsize=(10, 50)) category_list = df.select_dtypes(include="object").columns """ # iterating the columns for getting categories category_list=[] for item in df.columns: if item=='class' or item=='age' or item=='duration' or item=='credit_amount' or item=='installment_commitment'or item=='residence_since'or item=='existing_credits': category_list.append(item) else: category_list.append(item) """ x = 0 for i in category_list: x += 1 plt.subplot(11, 2, x) plt.xticks(rotation=90) # plt.gca().set_title(i) sns.countplot(x=i, hue="class", data=df) if x == 1 or x == 2: plt.tight_layout() def Label_Encoder(df): object_cols = df.select_dtypes(include="object").columns for col in object_cols: df[col] = LabelEncoder().fit_transform(df[col]) return df Label_Encoder(df) df.info() # Correlation matrix fig = plt.gcf() fig.set_size_inches(12, 8) plt.title("Correlation Between The Dimensions") a = sns.heatmap( df.corr(), annot=True, annot_kws={"size": 10}, fmt=".2f", linewidths=0.2 ) a.set_xticklabels(a.get_xticklabels(), rotation=90) a.set_yticklabels(a.get_yticklabels(), rotation=0) plt.show() # df['class'] = LabelEncoder().fit_transform(df['class']) plt.figure(figsize=(10, 5), dpi=100) heatmap = sns.heatmap( df.corr()[["class"]].sort_values(by="class", ascending=False), vmin=-1, vmax=1, annot=True, ) heatmap.set_title("Features Correlating with class", fontdict={"fontsize": 10}, pad=10) df = new_df.copy() df.info() # # Feature Selection and Extraction, and Reducing Dimension # define a function to drop words from feature def drop_words(s, words): for word in words: s = s.replace(word, "") return s.strip() # create a new column df["employment_new"] = "employed" # define a regular expression pattern gender_pattern = re.compile(r"(unemployed)", flags=re.IGNORECASE) # loop through each row in the dataframe for index, row in df.iterrows(): match = gender_pattern.search(row["employment"]) if match: df.at[index, "employment_new"] = match.group() row["employment"] = gender_pattern.sub("", row["employment"]) # drop the words words_to_drop = [">", "<", "=", "unemployed"] df["employment"] = df["employment"].apply(lambda x: drop_words(x, words_to_drop)) # df.drop('personal_status', inplace=True, axis=1) del df["employment"] # create a new column df["gender"] = "Unknown" # define a regular expression pattern gender_pattern = re.compile(r"(male|female)", flags=re.IGNORECASE) # loop through each row in the dataframe for index, row in df.iterrows(): match = gender_pattern.search(row["personal_status"]) if match: df.at[index, "gender"] = match.group() row["personal_status"] = gender_pattern.sub("", row["personal_status"]) # drop the words words_to_drop = ["male", "female", "fe"] df["personal_status"] = df["personal_status"].apply( lambda x: drop_words(x, words_to_drop) ) # df.drop('personal_status', inplace=True, axis=1) del df["personal_status"] # create a new column df["job_new"] = "high_qualified" # define a regular expression pattern job_pattern = re.compile(r"(skilled|unskilled)", flags=re.IGNORECASE) # loop through each row in the dataframe for index, row in df.iterrows(): match = job_pattern.search(row["job"]) if match: df.at[index, "job_new"] = match.group() row["job"] = job_pattern.sub("", row["job"]) # drop the words words_to_drop = ["skilled", "unskilled"] df["job"] = df["job"].apply(lambda x: drop_words(x, words_to_drop)) # df.drop('personal_status', inplace=True, axis=1) del df["job"] # create a new column df["credit_history_new"] = "existing_paid" # define a regular expression pattern credit_pattern = re.compile(r"(all paid|delayed previously)", flags=re.IGNORECASE) # loop through each row in the dataframe for index, row in df.iterrows(): match = credit_pattern.search(row["credit_history"]) if match: df.at[index, "credit_history_new"] = match.group() row["credit_history"] = credit_pattern.sub("", row["credit_history"]) # drop the words words_to_drop = ["all paid", "delayed previously"] df["credit_history"] = df["credit_history"].apply( lambda x: drop_words(x, words_to_drop) ) # df.drop('personal_status', inplace=True, axis=1) del df["credit_history"] # create a new column df["housing_new"] = "rent" # define a regular expression pattern gender_pattern = re.compile(r"(own)", flags=re.IGNORECASE) # loop through each row in the dataframe for index, row in df.iterrows(): match = gender_pattern.search(row["housing"]) if match: df.at[index, "housing_new"] = match.group() row["housing"] = gender_pattern.sub("", row["housing"]) # drop the words words_to_drop = ["rent", "own", "free"] df["housing"] = df["housing"].apply(lambda x: drop_words(x, words_to_drop)) # df.drop('personal_status', inplace=True, axis=1) del df["housing"] del df["foreign_worker"] del df["own_telephone"] del df["num_dependents"] del df["residence_since"] del df["other_parties"] del df["installment_commitment"] del df["property_magnitude"] del df["other_payment_plans"] del df["existing_credits"] del df["credit_amount"] # Age Group bins = [0, 30, 40, 50, 60, 70, 120] # Define age groups labels = ["0-30", "31-40", "41-50", "51-60", "61-70", "70+"] df["age_group"] = pd.cut(df["age"], bins=bins, labels=labels, include_lowest=True) df["age_group"] = df["age_group"].astype(object) del df["age"] # Duration Group bins = [0, 12, 24, 36, 48, 60, 72] # Define duration groups labels = ["0-12", "13-24", "25-36", "37-48", "49-60", "61-72"] df["duration_group"] = pd.cut( df["duration"], bins=bins, labels=labels, include_lowest=True ) df["duration_group"] = df["duration_group"].astype(object) del df["duration"] new_df = df.copy() new_df.info() sns.set_theme(style="whitegrid", palette="pastel") plt.figure(figsize=(10, 50)) category_list = df.select_dtypes(include="object").columns """ # iterating the columns for getting categories category_list=[] for item in df.columns: if item=='class' or item=='age' or item=='duration' or item=='credit_amount' or item=='installment_commitment'or item=='residence_since'or item=='existing_credits': category_list.append(item) else: category_list.append(item) """ x = 0 for i in category_list: x += 1 plt.subplot(11, 2, x) plt.xticks(rotation=90) # plt.gca().set_title(i) sns.countplot(x=i, hue="class", data=df) if x == 1 or x == 2: plt.tight_layout() df.info() Label_Encoder(df) df.info() # Correlation matrix fig = plt.gcf() fig.set_size_inches(15, 10) plt.title("Correlation Between The Dimensions") a = sns.heatmap( df.corr(), annot=True, annot_kws={"size": 10}, fmt=".2f", linewidths=0.2 ) a.set_xticklabels(a.get_xticklabels(), rotation=90) a.set_yticklabels(a.get_yticklabels(), rotation=0) plt.show() df.info() # price_range-class correlations # df['class'] = LabelEncoder().fit_transform(df['class']) plt.figure(figsize=(10, 5), dpi=100) heatmap = sns.heatmap( df.corr()[["class"]].sort_values(by="class", ascending=False), vmin=-1, vmax=1, annot=True, ) heatmap.set_title("Features Correlating with class", fontdict={"fontsize": 10}, pad=10) df = new_df.copy() df.info() # del df['age_group'] # del df['gender'] del df["job_new"] del df["employment_new"] df.info() # # Model Building and Configuration # apply function to df df = Label_Encoder(df) df.head().T # normalize the numerical columns using MinMaxScaler scaler = MinMaxScaler() feature = [ "checking_status", "purpose", "savings_status", "credit_history_new", "housing_new", "duration_group", "age_group", "gender", ] df[feature] = scaler.fit_transform(df[feature]) # balance the dataset using SMOTE smote = SMOTE() X = df.drop("class", axis=1) y = df["class"] X, y = smote.fit_resample(X, y) df = pd.concat([X, y], axis=1) # split the dataset into train and test sets X_train, X_test, y_train, y_test = train_test_split( df.drop("class", axis=1), df["class"], test_size=0.3, random_state=42 ) X_train.shape, y_train.shape def feature_importance_graph(model): # Create plot plt.figure(figsize=(8, 4)) # Create plot title plt.title("Feature Importance") # Get feature importances importance = model.feature_importances_ # Sort feature importances in descending order indices = np.argsort(importance) # Rearrange feature names so they match the sorted feature importances names = [X_train.columns[i] for i in indices] # Add bars plt.barh(range(X_train.shape[1]), importance[indices]) # Add feature names as y-axis labels, rotated vertically plt.yticks(range(X_train.shape[1]), names, fontsize=10, rotation=0) # Show plot plt.show() ros = RandomOverSampler(random_state=0) X_train, y_train = ros.fit_resample(X_train, y_train) # ## XGBoost # use GridSearchCV to find the best hyperparameters for XGBoost param_grid = { "max_depth": [3, 5, 7, 9], "learning_rate": [0.1, 0.01, 0.001], "n_estimators": [100, 500, 1000], } xgb = XGBClassifier(random_state=42) grid_search = GridSearchCV( xgb, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1, verbose=0 ) grid_search.fit(X_train, y_train) print("Best parameters: ", grid_search.best_params_) # train the XGBoost model xgb = XGBClassifier( max_depth=grid_search.best_params_["max_depth"], learning_rate=grid_search.best_params_["learning_rate"], n_estimators=grid_search.best_params_["n_estimators"], random_state=42, ) xgb.fit(X_train, y_train) # evaluate the performance of the model y_pred = xgb.predict(X_test) print(classification_report(y_test, y_pred)) xgb_accuracy = accuracy_score(y_test, y_pred) xgb_f1 = f1_score(y_test, y_pred) xgb_recall = recall_score(y_test, y_pred) xgb_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) feature_importance_graph(xgb) # ## Decision Tree # use GridSearchCV to find the best hyperparameters for Decision Tree param_grid = { "max_depth": [3, 4, 5, 6, 7, 9, 11], "min_samples_split": [2, 3, 4, 5, 6, 7], "criterion": ["entropy", "gini"], } model = DecisionTreeClassifier(random_state=42) grid_search = GridSearchCV( model, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1, verbose=0 ) grid_search.fit(X_train, y_train) print("Best parameters: ", grid_search.best_params_) decision_tree = DecisionTreeClassifier( max_depth=grid_search.best_params_["max_depth"], min_samples_split=grid_search.best_params_["min_samples_split"], criterion=grid_search.best_params_["criterion"], random_state=42, ) decision_tree.fit(X_train, y_train) # evaluate the performance of the model y_pred = decision_tree.predict(X_test) print(classification_report(y_test, y_pred)) tree_accuracy = accuracy_score(y_test, y_pred) tree_f1 = f1_score(y_test, y_pred) tree_recall = recall_score(y_test, y_pred) tree_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) feature_importance_graph(decision_tree) # ## Random Forest # use GridSearchCV to find the best hyperparameters for RandomForest param_grid = { "max_depth": [3, 4, 5, 6, 7, 9, 11], "min_samples_split": [2, 3, 4, 5, 6, 7], "criterion": ["entropy", "gini"], } model = RandomForestClassifier(random_state=42) grid_search = GridSearchCV( model, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1, verbose=0 ) grid_search.fit(X_train, y_train) print("Best parameters: ", grid_search.best_params_) random_forest = RandomForestClassifier( max_depth=grid_search.best_params_["max_depth"], min_samples_split=grid_search.best_params_["min_samples_split"], criterion=grid_search.best_params_["criterion"], random_state=42, ) random_forest.fit(X_train, y_train) # evaluate the performance of the model y_pred = random_forest.predict(X_test) print(classification_report(y_test, y_pred)) random_accuracy = accuracy_score(y_test, y_pred) random_f1 = f1_score(y_test, y_pred) random_recall = recall_score(y_test, y_pred) random_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) feature_importance_graph(random_forest) # ## Extra Trees # use GridSearchCV to find the best hyperparameters for Extra Trees param_grid = { "max_depth": [3, 4, 5, 6, 7, 9, 11], "min_samples_split": [2, 3, 4, 5, 6, 7], "criterion": ["entropy", "gini"], } #'n_estimators': [100, 500, 1000] model = ExtraTreesClassifier(random_state=42) grid_search = GridSearchCV( model, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1, verbose=0 ) grid_search.fit(X_train, y_train) print("Best parameters: ", grid_search.best_params_) extra_trees = ExtraTreesClassifier( max_depth=grid_search.best_params_["max_depth"], min_samples_split=grid_search.best_params_["min_samples_split"], criterion=grid_search.best_params_["criterion"], random_state=42, ) extra_trees.fit(X_train, y_train) # evaluate the performance of the model y_pred = extra_trees.predict(X_test) print(classification_report(y_test, y_pred)) extra_accuracy = accuracy_score(y_test, y_pred) extra_f1 = f1_score(y_test, y_pred) extra_recall = recall_score(y_test, y_pred) extra_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) feature_importance_graph(extra_trees) # ## Naive Bayes naive_bayes = GaussianNB() naive_bayes.fit(X_train, y_train) # evaluate the performance of the model y_pred = naive_bayes.predict(X_test) print(classification_report(y_test, y_pred)) nb_accuracy = accuracy_score(y_test, y_pred) nb_f1 = f1_score(y_test, y_pred) nb_recall = recall_score(y_test, y_pred) nb_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) # ## KNN # use GridSearchCV to find the best hyperparameters for KNN param_grid = { "n_neighbors": [1, 2, 3, 4, 5, 7, 9], "p": [2, 3, 4, 5, 6, 7], "metric": ["minkowski", "euclidean", "manhattan"], } model = KNeighborsClassifier() grid_search = GridSearchCV( model, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1, verbose=0 ) grid_search.fit(X_train, y_train) print("Best parameters: ", grid_search.best_params_) knn = KNeighborsClassifier( n_neighbors=grid_search.best_params_["n_neighbors"], p=grid_search.best_params_["p"], metric=grid_search.best_params_["metric"], ) knn.fit(X_train, y_train) """ k_list = list(range(1,10)) k_values = dict(n_neighbors = k_list) grid = GridSearchCV(knn, k_values, cv = 2, scoring = 'accuracy', n_jobs = -1) grid.fit(X_train, y_train) """ # evaluate the performance of the model y_pred = knn.predict(X_test) print(classification_report(y_test, y_pred)) knn_accuracy = accuracy_score(y_test, y_pred) knn_f1 = f1_score(y_test, y_pred) knn_recall = recall_score(y_test, y_pred) knn_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) # ## Logistic Regression # #### It did relatively well at predicting both outcomes, but it's still not a good model, we have better ones. log_reg = LogisticRegression(random_state=42, max_iter=1000) log_reg.fit(X_train, y_train) # evaluate the performance of the model y_pred = log_reg.predict(X_test) print(classification_report(y_test, y_pred)) logistic_accuracy = accuracy_score(y_test, y_pred) logistic_f1 = f1_score(y_test, y_pred) logistic_recall = recall_score(y_test, y_pred) logistic_precision = precision_score(y_test, y_pred) cf_matrix = confusion_matrix(y_test, y_pred) plt.figure(figsize=(6, 4)) sns.heatmap(cf_matrix, annot=True) # Summary xgb_dict = { "Model": "XGB", "Accuracy": round(float(xgb_accuracy), 2), "F1": round(float(xgb_f1), 2), "Recall": round(float(xgb_recall), 2), "Precision": round(float(xgb_precision), 2), } tree_dict = { "Model": "Decision Tree", "Accuracy": round(float(tree_accuracy), 2), "F1": round(float(tree_f1), 2), "Recall": round(float(tree_recall), 2), "Precision": round(float(tree_precision), 2), } random_dict = { "Model": "Random Forest", "Accuracy": round(float(random_accuracy), 2), "F1": round(float(random_f1), 2), "Recall": round(float(random_recall), 2), "Precision": round(float(random_precision), 2), } extra_dict = { "Model": "Extra Tree", "Accuracy": round(float(extra_accuracy), 2), "F1": round(float(extra_f1), 2), "Recall": round(float(extra_recall), 2), "Precision": round(float(extra_precision), 2), } nb_dict = { "Model": "Naive Bayes", "Accuracy": round(float(nb_accuracy), 2), "F1": round(float(nb_f1), 2), "Recall": round(float(nb_recall), 2), "Precision": round(float(nb_precision), 2), } knn_dict = { "Model": "KNN", "Accuracy": round(float(knn_accuracy), 2), "F1": round(float(knn_f1), 2), "Recall": round(float(knn_recall), 2), "Precision": round(float(knn_precision), 2), } logistic_dict = { "Model": "Logistic Regression", "Accuracy": round(float(logistic_accuracy), 2), "F1": round(float(logistic_f1), 2), "Recall": round(float(logistic_recall), 2), "Precision": round(float(logistic_precision), 2), } result = pd.DataFrame( { "XGB": pd.Series(xgb_dict), "Decision Tree": pd.Series(tree_dict), "Random Forest": pd.Series(random_dict), "Extra Trees": pd.Series(extra_dict), "Naive Bayes": pd.Series(nb_dict), "KNN": pd.Series(knn_dict), "Logistic Regression": pd.Series(logistic_dict), } ) result
false
1
6,985
0
7,064
6,985
129998324
# **Introduction** # The aim of this study is to understand how dispositional affect and social preference assessing actual trust and altruistic behavior. # In this study, we collected data on: # * Incentivized decisions in the trust game and dictator game, which serve as proxies for trust level and altruism, respectively. # * People's perceived social distance to the agents depicted in different betrayal scenarios. # * People's emotional response and social cognition towards different betrayal scenarios. # * People's perceived social distance to the anonymous counterparts they encountered during the economic games. # More details can be checked on this OSF link https://osf.io/27t6b # This notebook provides a quick exploration and overview of how the dataset looks like. # ![](https://i.kym-cdn.com/entries/icons/original/000/014/737/07_trust-fall.gif) # import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns # Reset rcParams to default values mpl.rcParams.update() # globally setting seaborn sns.set(style="ticks", palette="muted", font_scale=1.2, context="notebook") mpl.rcParams["lines.linewidth"] = 2 mpl.rcParams["lines.markersize"] = 6 mpl.rcParams["font.size"] = 10 dirname = "/kaggle/input/economic-games-ba/" sl_all_avg = pd.read_csv(dirname + "sl_all_avg.csv") sl_both75_avg = pd.read_csv(dirname + "sl_both75_avg.csv") sl_both75_unfold = pd.read_csv(dirname + "sl_both75_unfold.csv") sl_dg75_avg = pd.read_csv(dirname + "sl_dg75_avg.csv") sl_dg75_unfold = pd.read_csv(dirname + "sl_dg75_unfold.csv") sl_tg75_avg = pd.read_csv(dirname + "sl_tg75_avg.csv") sl_tg75_unfold = pd.read_csv(dirname + "sl_tg75_unfold.csv") # sample from different regions areas = list(sl_all_avg["Nationality"]) sns.countplot(x=areas) plt.xticks(rotation=80) plt.show() female_tg_mean = sl_all_avg[sl_all_avg["Gender"] == "Female"]["TG_trustor"].mean() male_tg_mean = sl_all_avg[sl_all_avg["Gender"] == "Male"]["TG_trustor"].mean() print(female_tg_mean) print(male_tg_mean) # Create subplots fig, axs = plt.subplots(2, 2, figsize=(16, 16)) # decisions distributions in trust game by gender sns.histplot( data=sl_all_avg, x="TG_trustor", hue="Gender", element="step", kde=True, ax=axs[0, 0], ) axs[0, 0].set_title("Trust Distribution by Gender") # decisions distributions in dictator game by gender sns.histplot( data=sl_all_avg, x="DG_dictator", hue="Gender", element="step", kde=True, ax=axs[0, 1], ) axs[0, 1].set_title("Dictator Distribution by Gender") # violin plot for trust sns.violinplot(data=sl_all_avg, y="TG_trustor", x="Gender", ax=axs[1, 0]) axs[1, 0].set_title("trust") # violin plot for altruism sns.violinplot(data=sl_all_avg, y="DG_dictator", x="Gender", ax=axs[1, 1]) axs[1, 1].set_title("altrusim") plt.tight_layout() plt.show() # pivot table sl_all_avg.pivot_table( values=["TG_trustor", "DG_dictator"], index=["Gender", "educationClustered", "incomeClustered"], aggfunc=[np.min, np.max, np.mean, np.median], )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/998/129998324.ipynb
null
null
[{"Id": 129998324, "ScriptId": 38662336, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15152859, "CreationDate": "05/18/2023 03:13:37", "VersionNumber": 6.0, "Title": "EDA on decisions of strategic uncertainty", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 80.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 75.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# **Introduction** # The aim of this study is to understand how dispositional affect and social preference assessing actual trust and altruistic behavior. # In this study, we collected data on: # * Incentivized decisions in the trust game and dictator game, which serve as proxies for trust level and altruism, respectively. # * People's perceived social distance to the agents depicted in different betrayal scenarios. # * People's emotional response and social cognition towards different betrayal scenarios. # * People's perceived social distance to the anonymous counterparts they encountered during the economic games. # More details can be checked on this OSF link https://osf.io/27t6b # This notebook provides a quick exploration and overview of how the dataset looks like. # ![](https://i.kym-cdn.com/entries/icons/original/000/014/737/07_trust-fall.gif) # import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns # Reset rcParams to default values mpl.rcParams.update() # globally setting seaborn sns.set(style="ticks", palette="muted", font_scale=1.2, context="notebook") mpl.rcParams["lines.linewidth"] = 2 mpl.rcParams["lines.markersize"] = 6 mpl.rcParams["font.size"] = 10 dirname = "/kaggle/input/economic-games-ba/" sl_all_avg = pd.read_csv(dirname + "sl_all_avg.csv") sl_both75_avg = pd.read_csv(dirname + "sl_both75_avg.csv") sl_both75_unfold = pd.read_csv(dirname + "sl_both75_unfold.csv") sl_dg75_avg = pd.read_csv(dirname + "sl_dg75_avg.csv") sl_dg75_unfold = pd.read_csv(dirname + "sl_dg75_unfold.csv") sl_tg75_avg = pd.read_csv(dirname + "sl_tg75_avg.csv") sl_tg75_unfold = pd.read_csv(dirname + "sl_tg75_unfold.csv") # sample from different regions areas = list(sl_all_avg["Nationality"]) sns.countplot(x=areas) plt.xticks(rotation=80) plt.show() female_tg_mean = sl_all_avg[sl_all_avg["Gender"] == "Female"]["TG_trustor"].mean() male_tg_mean = sl_all_avg[sl_all_avg["Gender"] == "Male"]["TG_trustor"].mean() print(female_tg_mean) print(male_tg_mean) # Create subplots fig, axs = plt.subplots(2, 2, figsize=(16, 16)) # decisions distributions in trust game by gender sns.histplot( data=sl_all_avg, x="TG_trustor", hue="Gender", element="step", kde=True, ax=axs[0, 0], ) axs[0, 0].set_title("Trust Distribution by Gender") # decisions distributions in dictator game by gender sns.histplot( data=sl_all_avg, x="DG_dictator", hue="Gender", element="step", kde=True, ax=axs[0, 1], ) axs[0, 1].set_title("Dictator Distribution by Gender") # violin plot for trust sns.violinplot(data=sl_all_avg, y="TG_trustor", x="Gender", ax=axs[1, 0]) axs[1, 0].set_title("trust") # violin plot for altruism sns.violinplot(data=sl_all_avg, y="DG_dictator", x="Gender", ax=axs[1, 1]) axs[1, 1].set_title("altrusim") plt.tight_layout() plt.show() # pivot table sl_all_avg.pivot_table( values=["TG_trustor", "DG_dictator"], index=["Gender", "educationClustered", "incomeClustered"], aggfunc=[np.min, np.max, np.mean, np.median], )
false
0
1,065
0
1,065
1,065
129998206
<jupyter_start><jupyter_text>Mobile Games: A/B Testing This dataset is from a DataCamp project: https://www.datacamp.com/projects/184. The data is about an A/B test with a mobile game, Cookie Cats. Kaggle dataset identifier: mobile-games-ab-testing <jupyter_script># # 🧪 The Great Divide: A/B Testing and Hypothesis Testing # In this notebook, we'll be going over a comprehensive review of **A/B Testing** and the different types of statistical tests used in A/B testing. This may require you to have some prerequisite knowledge on inferential statistics. Fortunately, as a former statistics TA, I wrote an entire series on an introduction to inferential statistics on my blog. Check it out at [benchenblog.com](http://benchenblog.com) if you're interested. Anyways, let's begin shall we! # ## Modules/Packages # There's not a whole lot of modules that we'll be using in this notebook, since most statistical tests are packaged in either `scipy` or `statsmodel`. Other than the two, we'll just import the usual ones like `numpy`, `numpy` and `matplotlib`. Personally, I prefer to use `seaborn` for data visualization, so let's import it as well. # import modules import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style("darkgrid") import scipy.stats as stats from statsmodels.stats.proportion import proportions_ztest # ## Dataset # In this notebook, we'll be dealing with a rather simple dataset from the mobile game *Cookie Cats*. It only contains five variables in total, so let's take a look at them one by one. # * `userid` - the `userid` is just the user id. Each unique id represents a unique player. Nothing special here. # * `version` - `version` takes on two values: `gate_30` and `gate40`. When users are playing this game, they encounter gates that force them to wait a while before they can progress. `gate_30` and `gate_40` just means that the gate is placed at level 30 and level 40, respectively. `version` is also our group of interest. We want to perform A/B testing to determine whether placing the gate at these two different levels would impact player retention and game rounds. # * `sum_gamerounds` - the number of game rounds played by the user in the first week of installation # * `retention_1` (bool) - whether the user came back to play the game 1 day after installation # * `retention_7` (bool) - whether the user came back to play the game 7 days after installation # read data data = pd.read_csv("/kaggle/input/mobile-games-ab-testing/cookie_cats.csv") data.head() # We also have quite a lot of samples in our dataset. In total, we have 90189 samples, which should be more than enough for us to perform our A/B testing. data.shape # ## Exploratory Data Analysis (EDA) # Okay, now let's take a deeper dive into our dataset by constructing some data visualizations. We'll start with the distribution for `version`, our group of interest. # ### version # plot count plot sns.countplot(data=data, x="version") # We can see that the counts for the values are roughly the same, both having more than 40,000 counts. The counts of `gate_40` does slightly exceed that of `gate_30`. Next, let's try to visualize `sum_gamerounds`. Since this varaible is numerical, we'll plot it using a histogram. # ### sum_gamerounds # plot histogram sns.histplot(data=data, x="sum_gamerounds", bins=50) # Woah! What happened here? Although it seems like there are no values to the right of the histogram, there actually are. It's just that the count is way too small compared to the first bin, where most value lies. Let's check the boxplot instead. # plot boxplot sns.boxplot(data=data, x="sum_gamerounds") # Ah, now we can see the problem. Apparently, there is one extreme outlier that played nearly 50,000 rounds of game within 1 week of installation. That's insane! This person must really love this game! If not, it's probably due to a bug or data error. For the sake of our test, we'll have to remove this abnormal sample. Next, let's take a look at `retention_1`. # ### retention_1 # plot countplot sns.countplot(data=data, x="retention_1") # We can see that there are more `False` values than `True`. This means that most people (more than 50%) never played the game again after 1 day of installation. Perhaps, this is something that the developers can think about if they want to retain more loyal players. # ### retention_7 # plot countplot sns.countplot(data=data, x="retention_7") # As expected, the number of players that continued to play this game after 1 week of installation drops even lower. At the end of the day, this game is likely designed for some immediate satisfaction, and not for long-term dedication like many role-playing games are designed to do. # ### Remove Outlier # Okay, now as mentioned before, let's remove that one single outlier. # remove outlier data = data.drop(data[data.sum_gamerounds > 40000].index) # ## Bivariate EDA # Now that we have removed that outlier, let's visualize our data again. This time, we'll be looking at how the three variables `sum_gamerounds`, `retention_1` and `retention_7` distribute among the two `version` groups. # ### sum_gamerounds # With that extreme outlier out of the way, we finally get to see how `sum_gamerounds` is distributed. The distribution of `sum_gamerounds` seems to be roughly the same for the two versions. Both seem to have a right-skewed distribution. Given this simple visualization, we can already have an initial guess that the different versions (`gate_30` and `gate_40`) will not have a huge impact on the values of `sum_gamerounds`. Of course, this is only just an initial guess. We'll need to perform statistical tests to back up any claim. # set figure size sns.set(rc={"figure.figsize": (15, 5)}) # plot boxplot sns.boxplot(data=data, x="sum_gamerounds", y="version") # The boxplots above may still be unclear due to their skewness. So let's take a look at their specific statistics to see if we can extract some more insights. # obtain descriptive statistics for sum_gamerounds data.sum_gamerounds.describe() # Let's also zoom into histogram for when `sum_gamerounds` (the x-axis) is less than 200. Setting the bins as the maximum value of `sum_gamerounds` allows us to easily identify that most players only once in the first week after installation. There was also a surprising amount of players who didn't play the game at all! Perhaps, developers of the game need to investigate into this issue. # set figure size sns.set(rc={"figure.figsize": (7, 5)}) # plot histogram sns.histplot(data["sum_gamerounds"], bins=2961) # limit x-axis to range (0, 200) plt.xlim(0, 200) # ### retention_1 # Our next variable in the line is `retention_1`. Since this variable is categorical variable, we can easily compare its distribution between the two version using a count plot. # plot count plot sns.countplot(data=data, x="retention_1", hue="version") # Again, it appears that the proportion of those players who retained after one day of installation is approximately the same among the two versions. Remember that this dataset contains more samples for version `gate_40` and so if the two proprotions were equal, both orange bars should exceed the blue bars by a little bit. We'll also have to perform a hypothesis testing to validate this hypothesis. # ### retention_7 # What was mentioned about `retention_1` would also apply to `retention_7`. Let's see if we can identify any differences here. # plot count plot sns.countplot(data=data, x="retention_7", hue="version") # Do you see the difference between the count plot of `retention_1` and `retention_7`? In the count plot of `retention_7`, the orange count bar (`gate_40`) is higher than the blue count bar (`gate_30`) for `False` values, but less for `True` values. This is interesting because we just said that, if the two proportion of retention is the same, then both orange bars should be slightly higher than that of the blue bars. We might be able to identify a difference here. Of course, we can only conclude after we perform the hypothesis test. # ## Hypothesis Test # Okay, now that we have a pretty good general idea of our data, we can finally perform our A/B testing! But this is actually the trickiest part. There are many different options of statistical tests out there for you to choose. Which one should we use? Let's not jump too far ahead yet. We'll first seperate our data into the control and treatment group so it can be fed into our tests. # seperate control and treatment group control = data[data["version"] == "gate_30"] treatment = data[data["version"] == "gate_40"] # ## sum_gamerounds # The general rule of thumb for determining which statistical test to use depends on the variable type as well as the different assumptions required for different tests. We'll start with `sum_gamerounds`, a numerical variable. # distributions for sum_gamerounds gamerounds_control = control["sum_gamerounds"] gamerounds_treatment = treatment["sum_gamerounds"] # ### Two Sample t-test # We have two samples here, a control and a treatment. And if you recall from your statistics 101 classes, then you should know that we can perform a **two sample t-test** to test whether the unknown population means of two groups are equal or not. This is certainly an option, but we must also remember the conditions/assumptions required for the two sample t-test. # * Data values must be independent # * Data in each group follow a normal distribution # * The variances for the two groups are equal # We know that the data values are independent because they were collected from different users, but are the two distributions normal? From our previous visualization, we already have an idea that the distribution is likely skewed. To verify this, we have to perform other statistical tests to see if the distribution is normal or not. The same applies for assumption 3 (equal variances). We would also have to perform other statistical tests to conclude that they have equal variance. # ### Welch's t-test # In the case where the first two conditions are satisfied, but not the third (unequal variances), we may have to opt for a **Welch's t-test**. Some even argue that we should always perform the Welch's t-test over the two sample t-test, because it's very unlikely that the variance between two variances will ever be the same. # ## Check for Normality # Anyways, let's check whether our second condition (normality) is fulfilled or not. Again, we are left with many different options to check for normality. Here, I'll introduce three of the most commonly used methods: **Shapiro-Wilk Test**, **Kolmogorov–Smirnov Test**, and the **Q-Q Plot**. Let's go over them one by one. # ### Shapiro-Wilk Test # The implemetation of the **Shapiro-Wilk Test** is quite simple. We can directly import it from the `scipy.stats` package and we just feed the distribution into the function. The null hypothesis is that the data follows a normal distribution. Using a 0.05 level of significance, we can see that, for both the control and treatment distribution, we rejected our null hypothesis and concluded that they do not follow a normal distribution. # compute test statistics and p-value for our two distributions to check for normality control_shapiro_stat, control_shapiro_p = stats.shapiro(gamerounds_control) treatment_shapiro_stat, treatment_shapiro_p = stats.shapiro(gamerounds_treatment) # control group results print("Shapiro-Wilk test statistic for control group: ", control_shapiro_stat) print("P-value for control group: ", control_shapiro_p) if control_shapiro_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (control group sample follows a normal distribution)" ) else: print( "There is enough evidence to reject the null hypothesis (control group sample does not follow a normal distribution) " ) # treatment group results print("\nShapiro-Wilk test statistic for treatement group: ", treatment_shapiro_stat) print("P-value for treatment group: ", treatment_shapiro_p) if treatment_shapiro_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (treatment group sample follows a normal distribution)" ) else: print( "There is enough evidence to reject the null hypothesis (treatment sample does not follow a normal distribution) " ) # ### Kolmogorov–Smirnov test # The exact same code can be applied to the **Kolmogorov–Smirnov Test**, except we change `stats.shapiro` to `stats.kstest`. The KS test also indicates that both distributions are not normal. # compute test statistics and p-value for our two distributions to check for normality control_kstest_stat, control_kstest_p = stats.kstest(gamerounds_control, "norm") treatment_kstest_stat, treatment_kstest_p = stats.kstest(gamerounds_treatment, "norm") # control group results print("KS test statistic for control group: ", control_kstest_stat) print("P-value for control group: ", control_kstest_p) if control_kstest_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (control group sample follows a normal distribution)" ) else: print( "There is enough evidence to reject the null hypothesis (control group sample does not follow a normal distribution) " ) # treatment group results print("\nKS test statistic for treatement group: ", treatment_kstest_stat) print("P-value for treatment group: ", treatment_kstest_p) if treatment_kstest_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (treatment group sample follows a normal distribution)" ) else: print( "There is enough evidence to reject the null hypothesis (treatment sample does not follow a normal distribution) " ) # ### Q-Q Plot # Our last method, plotting the **Q-Q Plot**, serves only as a visual check for normality. In essence, if a distribution, a Q-Q plot should display a straight dotted line. However, we can see in the two Q-Q plots below, they are not straight at all. Instead, the upward curve resembles a right-skewed distribution, which matches with our initial data visualization. sns.set(rc={"figure.figsize": (10, 5)}) fig, ax = plt.subplots(1, 2) # Q-Q Plot stats.probplot(gamerounds_control, plot=ax[0]) stats.probplot(gamerounds_treatment, plot=ax[1]) ax[0].set_title("Control Group Q-Q plot") ax[1].set_title("Treatment Group Q-Q plot") plt.show() # Okay, so all three tests say that the two distributions are not normal. What now? If the normality condition is not satisfied, what statistical test should we perform? We'd have to resort to non-parametric tests, where no assumption about the distribution is required. Here, we can use the **Mann-Whitney U Test** to compare two medians (not the mean), which would be a better measure of central tendency than the mean when distributions are skewed. # ### Mann-Whitney U Test # The implementation of the **Mann-Whitney U Test** is also very simple in python. The null hypothesis for this test is that the two samples may come from the same distribution, which means that there should also be no significant difference in central tendency. # perform the Mann-Whitney U test mw_stat, mw_p = stats.mannwhitneyu(gamerounds_control, gamerounds_treatment) # print the results print("Mann-Whitney U statistic:", mw_stat) print("P-value:", mw_p) if mw_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (The two samples may come from the same distribution)" ) else: print( "There is enough evidence to reject the null hypothesis (The two samples may come from different distributions)" ) # As we can see from the result of the Mann-Whitney U Test, there is no significant evidence to reject the null hypothesis. We can thus conclude that the two samples may come from the same distribution and that the version type plays no role in affecting the values of sum_gamerounds. However, we must also acknowledge the fact that the p-value is 0.0508, just slightly above our level of significance of 0.05. This means that the result would be significant if we modified our level of significance to be just slightly higher. # ## Retention 1 # Okay, so that was the result for the variable `sum_gamerounds`. At a 0.05 level of significance, we concluded that there is no difference in the `sum_gamerounds` of `gate_30` and `gate_40`. Let' move on to our next variable, `retention_1`. Unlike `sum_gamerounds`, `retention_1` is a categorical variable (boolean datatype to be more specific). So instead of testing for mean or median, we would be testing the difference in proportion. There are generally two test options to test for proportion: **Two-sample Z Test for Proportions** and the **Chi-square Test of Independence**. # ### Two-sample Z Test for Proportions # The rule of thumb in determining which test to perform is to look at the sample size. The **Two-sample Z Test for Proportions** assumes that the sample sizes are large enough for the normal approximation to the sampling distribution. In our dataset, we have plenty of samples, so there should be no problem in conducting this test. To conduct the **Two-sample Z Test for Proportions**, we have to take the values of the number of successes and the total number of observations, and feed it into the `proportions_ztest` function (from `statsmodel.stats.proportion`). # number of True values for retention_1 in control and treatment group retention_1_successes = np.array( [sum(control["retention_1"]), sum(treatment["retention_1"])] ) # total number of observations in control and treatment group retention_1_nobs = np.array( [len(control["retention_1"]), len(treatment["retention_1"])] ) retention_1_successes, retention_1_nobs # perform two-sample Z test for proportions retention_1_stat, retention_1_p = proportions_ztest( count=retention_1_successes, nobs=retention_1_nobs ) # print the results print("retention_1 Z test statistics two sample proportions:", retention_1_stat) print("retention_1 p-value:", retention_1_p) if retention_1_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (The two proportions may be the same)" ) else: print( "There is enough evidence to reject the null hypothesis (The two proportions may be different)" ) # The test results indicate that at 0.05 level of signficance, we do not reject the null hypothesis and that there is no significant difference in the two proportions. This is also consistent with our initial guess during the data visualization stage. # ### Chi-square Test of Independence # The **Chi-square Test of Independence**, the other option, tests for independence between two categorical variables. This is a non-parametric test, which means it doesn't make any assumptions about the underlying distribution of the data. So in case where sample size is small, it may be more appropriate to use the chi-square test instead of the two-sample Z test for proportions. But do note that the chi-square test also has assumptions of its own. In particular: # - Both variables are categorical # - Observations are independent # - Observations are mutually exclusive # - Expected values of cells should be greater than five # For those who are not familiar with the chi-square test, this may come across as confusing. But the idea behind chi-square test is actually really simple and I encourage you to read about it. There are plenty of resources online, so I won't go into the deep details behind it. For now, just know that all four of these assumptions are satisfied and that we can indeed perform this test. # The implementation is simple. We construct the observed contingency table for our two variables and then feed it into the `chi2_contingency` function from `scipy.stats`. The function will return the test statistics, p-value, degree of freedom and the expected contingency table. # construct observed contingency table retention_1_observed = np.array( [ [ sum(control["retention_1"]), len(control["retention_1"]) - sum(control["retention_1"]), ], [ sum(treatment["retention_1"]), len(treatment["retention_1"]) - sum(treatment["retention_1"]), ], ] ) retention_1_observed # return test statistics, p-value, df and expected contingency table ( retention_1_chi2_stat, retention_1_chi2_p, retention_1_dof, retention_1_expected, ) = stats.chi2_contingency(retention_1_observed) # print the results print("Chi-square test statistics for retention_1: ", retention_1_chi2_stat) print("P-value for retention_1: ", retention_1_chi2_p) print("Degree of freedom: ", retention_1_dof) print(retention_1_expected) if retention_1_chi2_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (The two proportions may be the same)" ) else: print( "There is enough evidence to reject the null hypothesis (The two proportions may be different)" ) # The result of the chi-square test is consistent with the two-sample Z test for proportions. At a 0.05 level of significance, there is not enough evidence to reject the null hypothesis, so we can conclude that there are no relationship between `version` and `retention_1` (they are independent). # ## Retention 7 # Phew! Now we're finally down to our final variable, `retention_7`. This variable is essentially the same thing as `retention_1`. But remember that during the data visualization stage, we said that there could potentially be a significant difference in `retention_7` proportion between the two versions. Let's do the same two-sample Z test for proportions and chi-square test to check if our hypothesis is correct. # ### Two-sample Z Test for Proportions retention_7_successes = np.array( [sum(control["retention_7"]), sum(treatment["retention_7"])] ) retention_7_nobs = np.array( [len(control["retention_7"]), len(treatment["retention_7"])] ) retention_7_successes, retention_7_nobs retention_7_stat, retention_7_p = proportions_ztest( count=retention_7_successes, nobs=retention_7_nobs ) print("retention_7 Z test statistics two sample proportions:", retention_7_stat) print("retention_7 p-value:", retention_7_p) if retention_7_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (The two proportions may be the same)" ) else: print( "There is enough evidence to reject the null hypothesis (The two proportions may be different)" ) # ### Chi-squared Test retention_7_observed = np.array( [ [ sum(control["retention_7"]), len(control["retention_7"]) - sum(control["retention_7"]), ], [ sum(treatment["retention_7"]), len(treatment["retention_7"]) - sum(treatment["retention_7"]), ], ] ) retention_7_observed ( retention_7_chi2_stat, retention_7_chi2_p, retention_7_dof, retention_7_expected, ) = stats.chi2_contingency(retention_7_observed) print("Chi-square test statistics for retention_7: ", retention_7_chi2_stat) print("P-value for retention_7: ", retention_7_chi2_p) print("Degree of freedom: ", retention_7_dof) print(retention_7_expected) if retention_7_chi2_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (The two proportions may be the same)" ) else: print( "There is enough evidence to reject the null hypothesis (The two proportions may be different)" )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/998/129998206.ipynb
mobile-games-ab-testing
yufengsui
[{"Id": 129998206, "ScriptId": 36324354, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6126425, "CreationDate": "05/18/2023 03:11:59", "VersionNumber": 1.0, "Title": "The Great Divide: A/B Testing and Hypothesis Test", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 352.0, "LinesInsertedFromPrevious": 352.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186450405, "KernelVersionId": 129998206, "SourceDatasetVersionId": 564457}]
[{"Id": 564457, "DatasetId": 272421, "DatasourceVersionId": 581103, "CreatorUserId": 1461568, "LicenseName": "Unknown", "CreationDate": "07/22/2019 08:37:03", "VersionNumber": 1.0, "Title": "Mobile Games: A/B Testing", "Slug": "mobile-games-ab-testing", "Subtitle": "Analyze an A/B test from the popular mobile puzzle game, Cookie Cats.", "Description": "This dataset is from a DataCamp project: https://www.datacamp.com/projects/184.\n\nThe data is about an A/B test with a mobile game, Cookie Cats.", "VersionNotes": "Initial release", "TotalCompressedBytes": 2797485.0, "TotalUncompressedBytes": 511415.0}]
[{"Id": 272421, "CreatorUserId": 1461568, "OwnerUserId": 1461568.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 564457.0, "CurrentDatasourceVersionId": 581103.0, "ForumId": 283777, "Type": 2, "CreationDate": "07/22/2019 08:37:03", "LastActivityDate": "07/22/2019", "TotalViews": 34634, "TotalDownloads": 3307, "TotalVotes": 76, "TotalKernels": 68}]
[{"Id": 1461568, "UserName": "yufengsui", "DisplayName": "Aurelia Sui", "RegisterDate": "12/01/2017", "PerformanceTier": 1}]
# # 🧪 The Great Divide: A/B Testing and Hypothesis Testing # In this notebook, we'll be going over a comprehensive review of **A/B Testing** and the different types of statistical tests used in A/B testing. This may require you to have some prerequisite knowledge on inferential statistics. Fortunately, as a former statistics TA, I wrote an entire series on an introduction to inferential statistics on my blog. Check it out at [benchenblog.com](http://benchenblog.com) if you're interested. Anyways, let's begin shall we! # ## Modules/Packages # There's not a whole lot of modules that we'll be using in this notebook, since most statistical tests are packaged in either `scipy` or `statsmodel`. Other than the two, we'll just import the usual ones like `numpy`, `numpy` and `matplotlib`. Personally, I prefer to use `seaborn` for data visualization, so let's import it as well. # import modules import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style("darkgrid") import scipy.stats as stats from statsmodels.stats.proportion import proportions_ztest # ## Dataset # In this notebook, we'll be dealing with a rather simple dataset from the mobile game *Cookie Cats*. It only contains five variables in total, so let's take a look at them one by one. # * `userid` - the `userid` is just the user id. Each unique id represents a unique player. Nothing special here. # * `version` - `version` takes on two values: `gate_30` and `gate40`. When users are playing this game, they encounter gates that force them to wait a while before they can progress. `gate_30` and `gate_40` just means that the gate is placed at level 30 and level 40, respectively. `version` is also our group of interest. We want to perform A/B testing to determine whether placing the gate at these two different levels would impact player retention and game rounds. # * `sum_gamerounds` - the number of game rounds played by the user in the first week of installation # * `retention_1` (bool) - whether the user came back to play the game 1 day after installation # * `retention_7` (bool) - whether the user came back to play the game 7 days after installation # read data data = pd.read_csv("/kaggle/input/mobile-games-ab-testing/cookie_cats.csv") data.head() # We also have quite a lot of samples in our dataset. In total, we have 90189 samples, which should be more than enough for us to perform our A/B testing. data.shape # ## Exploratory Data Analysis (EDA) # Okay, now let's take a deeper dive into our dataset by constructing some data visualizations. We'll start with the distribution for `version`, our group of interest. # ### version # plot count plot sns.countplot(data=data, x="version") # We can see that the counts for the values are roughly the same, both having more than 40,000 counts. The counts of `gate_40` does slightly exceed that of `gate_30`. Next, let's try to visualize `sum_gamerounds`. Since this varaible is numerical, we'll plot it using a histogram. # ### sum_gamerounds # plot histogram sns.histplot(data=data, x="sum_gamerounds", bins=50) # Woah! What happened here? Although it seems like there are no values to the right of the histogram, there actually are. It's just that the count is way too small compared to the first bin, where most value lies. Let's check the boxplot instead. # plot boxplot sns.boxplot(data=data, x="sum_gamerounds") # Ah, now we can see the problem. Apparently, there is one extreme outlier that played nearly 50,000 rounds of game within 1 week of installation. That's insane! This person must really love this game! If not, it's probably due to a bug or data error. For the sake of our test, we'll have to remove this abnormal sample. Next, let's take a look at `retention_1`. # ### retention_1 # plot countplot sns.countplot(data=data, x="retention_1") # We can see that there are more `False` values than `True`. This means that most people (more than 50%) never played the game again after 1 day of installation. Perhaps, this is something that the developers can think about if they want to retain more loyal players. # ### retention_7 # plot countplot sns.countplot(data=data, x="retention_7") # As expected, the number of players that continued to play this game after 1 week of installation drops even lower. At the end of the day, this game is likely designed for some immediate satisfaction, and not for long-term dedication like many role-playing games are designed to do. # ### Remove Outlier # Okay, now as mentioned before, let's remove that one single outlier. # remove outlier data = data.drop(data[data.sum_gamerounds > 40000].index) # ## Bivariate EDA # Now that we have removed that outlier, let's visualize our data again. This time, we'll be looking at how the three variables `sum_gamerounds`, `retention_1` and `retention_7` distribute among the two `version` groups. # ### sum_gamerounds # With that extreme outlier out of the way, we finally get to see how `sum_gamerounds` is distributed. The distribution of `sum_gamerounds` seems to be roughly the same for the two versions. Both seem to have a right-skewed distribution. Given this simple visualization, we can already have an initial guess that the different versions (`gate_30` and `gate_40`) will not have a huge impact on the values of `sum_gamerounds`. Of course, this is only just an initial guess. We'll need to perform statistical tests to back up any claim. # set figure size sns.set(rc={"figure.figsize": (15, 5)}) # plot boxplot sns.boxplot(data=data, x="sum_gamerounds", y="version") # The boxplots above may still be unclear due to their skewness. So let's take a look at their specific statistics to see if we can extract some more insights. # obtain descriptive statistics for sum_gamerounds data.sum_gamerounds.describe() # Let's also zoom into histogram for when `sum_gamerounds` (the x-axis) is less than 200. Setting the bins as the maximum value of `sum_gamerounds` allows us to easily identify that most players only once in the first week after installation. There was also a surprising amount of players who didn't play the game at all! Perhaps, developers of the game need to investigate into this issue. # set figure size sns.set(rc={"figure.figsize": (7, 5)}) # plot histogram sns.histplot(data["sum_gamerounds"], bins=2961) # limit x-axis to range (0, 200) plt.xlim(0, 200) # ### retention_1 # Our next variable in the line is `retention_1`. Since this variable is categorical variable, we can easily compare its distribution between the two version using a count plot. # plot count plot sns.countplot(data=data, x="retention_1", hue="version") # Again, it appears that the proportion of those players who retained after one day of installation is approximately the same among the two versions. Remember that this dataset contains more samples for version `gate_40` and so if the two proprotions were equal, both orange bars should exceed the blue bars by a little bit. We'll also have to perform a hypothesis testing to validate this hypothesis. # ### retention_7 # What was mentioned about `retention_1` would also apply to `retention_7`. Let's see if we can identify any differences here. # plot count plot sns.countplot(data=data, x="retention_7", hue="version") # Do you see the difference between the count plot of `retention_1` and `retention_7`? In the count plot of `retention_7`, the orange count bar (`gate_40`) is higher than the blue count bar (`gate_30`) for `False` values, but less for `True` values. This is interesting because we just said that, if the two proportion of retention is the same, then both orange bars should be slightly higher than that of the blue bars. We might be able to identify a difference here. Of course, we can only conclude after we perform the hypothesis test. # ## Hypothesis Test # Okay, now that we have a pretty good general idea of our data, we can finally perform our A/B testing! But this is actually the trickiest part. There are many different options of statistical tests out there for you to choose. Which one should we use? Let's not jump too far ahead yet. We'll first seperate our data into the control and treatment group so it can be fed into our tests. # seperate control and treatment group control = data[data["version"] == "gate_30"] treatment = data[data["version"] == "gate_40"] # ## sum_gamerounds # The general rule of thumb for determining which statistical test to use depends on the variable type as well as the different assumptions required for different tests. We'll start with `sum_gamerounds`, a numerical variable. # distributions for sum_gamerounds gamerounds_control = control["sum_gamerounds"] gamerounds_treatment = treatment["sum_gamerounds"] # ### Two Sample t-test # We have two samples here, a control and a treatment. And if you recall from your statistics 101 classes, then you should know that we can perform a **two sample t-test** to test whether the unknown population means of two groups are equal or not. This is certainly an option, but we must also remember the conditions/assumptions required for the two sample t-test. # * Data values must be independent # * Data in each group follow a normal distribution # * The variances for the two groups are equal # We know that the data values are independent because they were collected from different users, but are the two distributions normal? From our previous visualization, we already have an idea that the distribution is likely skewed. To verify this, we have to perform other statistical tests to see if the distribution is normal or not. The same applies for assumption 3 (equal variances). We would also have to perform other statistical tests to conclude that they have equal variance. # ### Welch's t-test # In the case where the first two conditions are satisfied, but not the third (unequal variances), we may have to opt for a **Welch's t-test**. Some even argue that we should always perform the Welch's t-test over the two sample t-test, because it's very unlikely that the variance between two variances will ever be the same. # ## Check for Normality # Anyways, let's check whether our second condition (normality) is fulfilled or not. Again, we are left with many different options to check for normality. Here, I'll introduce three of the most commonly used methods: **Shapiro-Wilk Test**, **Kolmogorov–Smirnov Test**, and the **Q-Q Plot**. Let's go over them one by one. # ### Shapiro-Wilk Test # The implemetation of the **Shapiro-Wilk Test** is quite simple. We can directly import it from the `scipy.stats` package and we just feed the distribution into the function. The null hypothesis is that the data follows a normal distribution. Using a 0.05 level of significance, we can see that, for both the control and treatment distribution, we rejected our null hypothesis and concluded that they do not follow a normal distribution. # compute test statistics and p-value for our two distributions to check for normality control_shapiro_stat, control_shapiro_p = stats.shapiro(gamerounds_control) treatment_shapiro_stat, treatment_shapiro_p = stats.shapiro(gamerounds_treatment) # control group results print("Shapiro-Wilk test statistic for control group: ", control_shapiro_stat) print("P-value for control group: ", control_shapiro_p) if control_shapiro_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (control group sample follows a normal distribution)" ) else: print( "There is enough evidence to reject the null hypothesis (control group sample does not follow a normal distribution) " ) # treatment group results print("\nShapiro-Wilk test statistic for treatement group: ", treatment_shapiro_stat) print("P-value for treatment group: ", treatment_shapiro_p) if treatment_shapiro_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (treatment group sample follows a normal distribution)" ) else: print( "There is enough evidence to reject the null hypothesis (treatment sample does not follow a normal distribution) " ) # ### Kolmogorov–Smirnov test # The exact same code can be applied to the **Kolmogorov–Smirnov Test**, except we change `stats.shapiro` to `stats.kstest`. The KS test also indicates that both distributions are not normal. # compute test statistics and p-value for our two distributions to check for normality control_kstest_stat, control_kstest_p = stats.kstest(gamerounds_control, "norm") treatment_kstest_stat, treatment_kstest_p = stats.kstest(gamerounds_treatment, "norm") # control group results print("KS test statistic for control group: ", control_kstest_stat) print("P-value for control group: ", control_kstest_p) if control_kstest_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (control group sample follows a normal distribution)" ) else: print( "There is enough evidence to reject the null hypothesis (control group sample does not follow a normal distribution) " ) # treatment group results print("\nKS test statistic for treatement group: ", treatment_kstest_stat) print("P-value for treatment group: ", treatment_kstest_p) if treatment_kstest_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (treatment group sample follows a normal distribution)" ) else: print( "There is enough evidence to reject the null hypothesis (treatment sample does not follow a normal distribution) " ) # ### Q-Q Plot # Our last method, plotting the **Q-Q Plot**, serves only as a visual check for normality. In essence, if a distribution, a Q-Q plot should display a straight dotted line. However, we can see in the two Q-Q plots below, they are not straight at all. Instead, the upward curve resembles a right-skewed distribution, which matches with our initial data visualization. sns.set(rc={"figure.figsize": (10, 5)}) fig, ax = plt.subplots(1, 2) # Q-Q Plot stats.probplot(gamerounds_control, plot=ax[0]) stats.probplot(gamerounds_treatment, plot=ax[1]) ax[0].set_title("Control Group Q-Q plot") ax[1].set_title("Treatment Group Q-Q plot") plt.show() # Okay, so all three tests say that the two distributions are not normal. What now? If the normality condition is not satisfied, what statistical test should we perform? We'd have to resort to non-parametric tests, where no assumption about the distribution is required. Here, we can use the **Mann-Whitney U Test** to compare two medians (not the mean), which would be a better measure of central tendency than the mean when distributions are skewed. # ### Mann-Whitney U Test # The implementation of the **Mann-Whitney U Test** is also very simple in python. The null hypothesis for this test is that the two samples may come from the same distribution, which means that there should also be no significant difference in central tendency. # perform the Mann-Whitney U test mw_stat, mw_p = stats.mannwhitneyu(gamerounds_control, gamerounds_treatment) # print the results print("Mann-Whitney U statistic:", mw_stat) print("P-value:", mw_p) if mw_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (The two samples may come from the same distribution)" ) else: print( "There is enough evidence to reject the null hypothesis (The two samples may come from different distributions)" ) # As we can see from the result of the Mann-Whitney U Test, there is no significant evidence to reject the null hypothesis. We can thus conclude that the two samples may come from the same distribution and that the version type plays no role in affecting the values of sum_gamerounds. However, we must also acknowledge the fact that the p-value is 0.0508, just slightly above our level of significance of 0.05. This means that the result would be significant if we modified our level of significance to be just slightly higher. # ## Retention 1 # Okay, so that was the result for the variable `sum_gamerounds`. At a 0.05 level of significance, we concluded that there is no difference in the `sum_gamerounds` of `gate_30` and `gate_40`. Let' move on to our next variable, `retention_1`. Unlike `sum_gamerounds`, `retention_1` is a categorical variable (boolean datatype to be more specific). So instead of testing for mean or median, we would be testing the difference in proportion. There are generally two test options to test for proportion: **Two-sample Z Test for Proportions** and the **Chi-square Test of Independence**. # ### Two-sample Z Test for Proportions # The rule of thumb in determining which test to perform is to look at the sample size. The **Two-sample Z Test for Proportions** assumes that the sample sizes are large enough for the normal approximation to the sampling distribution. In our dataset, we have plenty of samples, so there should be no problem in conducting this test. To conduct the **Two-sample Z Test for Proportions**, we have to take the values of the number of successes and the total number of observations, and feed it into the `proportions_ztest` function (from `statsmodel.stats.proportion`). # number of True values for retention_1 in control and treatment group retention_1_successes = np.array( [sum(control["retention_1"]), sum(treatment["retention_1"])] ) # total number of observations in control and treatment group retention_1_nobs = np.array( [len(control["retention_1"]), len(treatment["retention_1"])] ) retention_1_successes, retention_1_nobs # perform two-sample Z test for proportions retention_1_stat, retention_1_p = proportions_ztest( count=retention_1_successes, nobs=retention_1_nobs ) # print the results print("retention_1 Z test statistics two sample proportions:", retention_1_stat) print("retention_1 p-value:", retention_1_p) if retention_1_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (The two proportions may be the same)" ) else: print( "There is enough evidence to reject the null hypothesis (The two proportions may be different)" ) # The test results indicate that at 0.05 level of signficance, we do not reject the null hypothesis and that there is no significant difference in the two proportions. This is also consistent with our initial guess during the data visualization stage. # ### Chi-square Test of Independence # The **Chi-square Test of Independence**, the other option, tests for independence between two categorical variables. This is a non-parametric test, which means it doesn't make any assumptions about the underlying distribution of the data. So in case where sample size is small, it may be more appropriate to use the chi-square test instead of the two-sample Z test for proportions. But do note that the chi-square test also has assumptions of its own. In particular: # - Both variables are categorical # - Observations are independent # - Observations are mutually exclusive # - Expected values of cells should be greater than five # For those who are not familiar with the chi-square test, this may come across as confusing. But the idea behind chi-square test is actually really simple and I encourage you to read about it. There are plenty of resources online, so I won't go into the deep details behind it. For now, just know that all four of these assumptions are satisfied and that we can indeed perform this test. # The implementation is simple. We construct the observed contingency table for our two variables and then feed it into the `chi2_contingency` function from `scipy.stats`. The function will return the test statistics, p-value, degree of freedom and the expected contingency table. # construct observed contingency table retention_1_observed = np.array( [ [ sum(control["retention_1"]), len(control["retention_1"]) - sum(control["retention_1"]), ], [ sum(treatment["retention_1"]), len(treatment["retention_1"]) - sum(treatment["retention_1"]), ], ] ) retention_1_observed # return test statistics, p-value, df and expected contingency table ( retention_1_chi2_stat, retention_1_chi2_p, retention_1_dof, retention_1_expected, ) = stats.chi2_contingency(retention_1_observed) # print the results print("Chi-square test statistics for retention_1: ", retention_1_chi2_stat) print("P-value for retention_1: ", retention_1_chi2_p) print("Degree of freedom: ", retention_1_dof) print(retention_1_expected) if retention_1_chi2_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (The two proportions may be the same)" ) else: print( "There is enough evidence to reject the null hypothesis (The two proportions may be different)" ) # The result of the chi-square test is consistent with the two-sample Z test for proportions. At a 0.05 level of significance, there is not enough evidence to reject the null hypothesis, so we can conclude that there are no relationship between `version` and `retention_1` (they are independent). # ## Retention 7 # Phew! Now we're finally down to our final variable, `retention_7`. This variable is essentially the same thing as `retention_1`. But remember that during the data visualization stage, we said that there could potentially be a significant difference in `retention_7` proportion between the two versions. Let's do the same two-sample Z test for proportions and chi-square test to check if our hypothesis is correct. # ### Two-sample Z Test for Proportions retention_7_successes = np.array( [sum(control["retention_7"]), sum(treatment["retention_7"])] ) retention_7_nobs = np.array( [len(control["retention_7"]), len(treatment["retention_7"])] ) retention_7_successes, retention_7_nobs retention_7_stat, retention_7_p = proportions_ztest( count=retention_7_successes, nobs=retention_7_nobs ) print("retention_7 Z test statistics two sample proportions:", retention_7_stat) print("retention_7 p-value:", retention_7_p) if retention_7_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (The two proportions may be the same)" ) else: print( "There is enough evidence to reject the null hypothesis (The two proportions may be different)" ) # ### Chi-squared Test retention_7_observed = np.array( [ [ sum(control["retention_7"]), len(control["retention_7"]) - sum(control["retention_7"]), ], [ sum(treatment["retention_7"]), len(treatment["retention_7"]) - sum(treatment["retention_7"]), ], ] ) retention_7_observed ( retention_7_chi2_stat, retention_7_chi2_p, retention_7_dof, retention_7_expected, ) = stats.chi2_contingency(retention_7_observed) print("Chi-square test statistics for retention_7: ", retention_7_chi2_stat) print("P-value for retention_7: ", retention_7_chi2_p) print("Degree of freedom: ", retention_7_dof) print(retention_7_expected) if retention_7_chi2_p > 0.05: print( "There is not enough evidence to reject the null hypothesis (The two proportions may be the same)" ) else: print( "There is enough evidence to reject the null hypothesis (The two proportions may be different)" )
false
1
6,178
0
6,250
6,178
129456040
<jupyter_start><jupyter_text>BirdCLEF2021 background noise Kaggle dataset identifier: birdclef2021-background-noise <jupyter_script>import os, sys, re, glob, random import pandas as pd import librosa as lb import IPython.display as ipd import soundfile as sf import numpy as np import ast, joblib, pickle from pathlib import Path import librosa.display from sklearn import preprocessing # Deep learning from pytorch import torch, torchaudio import torchvision from torch.utils.data import DataLoader, Dataset import torch.optim as optim from torchvision import transforms import torch.nn as nn import torch.nn.functional as F from timm.scheduler import CosineLRScheduler from albumentations.core.transforms_interface import ImageOnlyTransform from torchlibrosa.stft import LogmelFilterBank, Spectrogram from torchlibrosa.augmentation import SpecAugmentation from tqdm import tqdm from torch.nn.parameter import Parameter import copy, codecs import sklearn.metrics import audiomentations as AA import matplotlib.pyplot as plt train_aug = AA.Compose( [ AA.AddBackgroundNoise( sounds_path="/kaggle/input/birdclef2021-background-noise/ff1010bird_nocall/nocall", min_snr_in_db=5, max_snr_in_db=10, p=0.5, ), AA.AddBackgroundNoise( sounds_path="/kaggle/input/birdclef2021-background-noise/train_soundscapes/nocall", min_snr_in_db=5, max_snr_in_db=10, p=0.5, ), AA.AddBackgroundNoise( sounds_path="/kaggle/input/birdclef2021-background-noise/aicrowd2020_noise_30sec/noise_30sec", min_snr_in_db=5, max_snr_in_db=10, p=0.75, ), AA.AddBackgroundNoise( sounds_path="/kaggle/input/birdclef2023esc50-sample/useesc50", min_snr_in_db=5, max_snr_in_db=10, p=0.75, ), AA.AddGaussianSNR(min_snr_in_db=5, max_snr_in_db=10.0, p=0.25), ] ) class WaveformDataset(Dataset): def __init__( self, CFG, df: pd.DataFrame, period=5, prilabelp=1.0, seclabelp=0.5, mixup_prob=0.15, smooth=0.005, mask=None, ): self.df = df.reset_index(drop=True) self.CFG = CFG self.aug = train_aug self.sr = CFG.sr self.period = period self.df["sort_index"] = self.df.index self.smooth = smooth self.prilabelp = prilabelp - self.smooth self.seclabelp = seclabelp - self.smooth self.train = train if mask is not None: print("set mask") self.mask = mask else: self.mask = None self.period_idx = int(self.period * 100) self.mel = torchaudio.transforms.MelSpectrogram( n_mels=CFG.n_mel, sample_rate=CFG.sr, f_min=CFG.fmin, f_max=CFG.fmax, n_fft=CFG.n_fft, hop_length=CFG.hop_len, norm=None, power=CFG.power, mel_scale="htk", ) self.ptodb = torchaudio.transforms.AmplitudeToDB(top_db=CFG.top_db) # Matrix Factorization (サブラベル同士は相関なしとして扱う) self.mfdf = ( self.df[(self.df.sec_num > 0)][["label_id", "labels_id"]] .explode("labels_id") .reset_index(drop=True) ) # mixupするlabel_idリストを作成する self.mixup_idlist = ( self.mfdf.groupby("label_id").labels_id.apply(list).to_dict() ) # mixupする先はシングルラベルにする sdf = self.df[(self.df.sec_num == 0) | (self.df.primary_label == "lotcor1")] # label_idリストからレコード番号を取得し、レコード番号からランダムサンプリングする self.id2record = sdf.groupby("label_id").sort_index.apply(list) self.train = self.df[self.df.sec > 10].reset_index(drop=True) def crop_or_pad(self, y, length, is_train=False, start=None): if len(y) < length // 2: if is_train: wid = length // 2 - len(y) start = np.random.randint(length // 2, length // 2 + wid) y_cp = np.zeros(length, dtype=np.float32) y_cp[start : start + len(y)] = y y = y_cp else: y = np.concatenate([y, np.zeros(length - len(y))]) elif len(y) < length: y = np.concatenate([y, np.zeros(length - len(y))]) elif len(y) > length: if not is_train: start = start or 0 else: start = start or np.random.randint(len(y) - length) y = y[start : start + length] return y def __len__(self): return len(self.train) def get_audio(self, row, offset=0): # データ読み込み data, sr = librosa.load(row.audio_paths, sr=self.sr, mono=True) # augemnt1 if random.uniform(0, 1) < row.weight: data = self.aug(samples=data, sample_rate=sr) # test datasetの最大長 max_sec = len(data) // sr # 0秒の場合は1秒として取り扱う max_sec = 1 if max_sec == 0 else max_sec return data def load_audio(self, row): data = self.get_audio(row) labels = torch.zeros(self.CFG.CLASS_NUM, dtype=torch.float32) + self.smooth if row.sec_num != 0: labels[row.labels_id] = self.seclabelp if row.label_id != -1: labels[row.label_id] = self.prilabelp return data, labels def __getitem__(self, idx): row = self.train.iloc[idx] audio1, label1 = self.load_audio(row) if row.label_id in list(self.mixup_idlist.keys()): # FMからペアとなるラベルIDを取得 pair_label_id = np.random.choice(self.mixup_idlist[row.label_id]) pair_idx = np.random.choice(self.id2record[pair_label_id]) row2 = self.df.iloc[pair_idx] audio2, label2 = self.load_audio(row2) label = torch.stack([label1, label2]) else: audio2 = audio1 label = torch.stack([label1, label1]) weight = torch.tensor(row.weight, dtype=torch.float32) return audio1, audio2, label, weight class Collate: def __init__(self, CFG): self.CFG = CFG self.unit = self.CFG.period * self.CFG.sr self.cutoff = self.CFG.factors * self.CFG.frame self.ptodb = torchaudio.transforms.AmplitudeToDB(top_db=CFG.top_db) self.random_state = np.random.RandomState(42) self.mixupalpha_out = CFG.mixup_alpha_out self.mixupalpha_in = CFG.mixup_alpha_in def get_lambda(self, batch_size, mixup_alpha): lams = [] inv_lams = [] for _ in range(batch_size): lam = self.random_state.beta(mixup_alpha, mixup_alpha, 1)[0] lams.append(lam) inv_lams.append(1.0 - lam) return torch.tensor(lams, dtype=torch.float32), torch.tensor( inv_lams, dtype=torch.float32 ) def crop_or_pad(self, y, length, start=None, is_train=False): unit_len = min(length, self.unit * 3) if len(y) < unit_len: start = np.random.randint(0, unit_len - len(y)) y_unit = np.zeros(unit_len, dtype=np.float32) y_unit[start : start + len(y)] = y n_repeats = length // unit_len epsilon = length % unit_len y = np.concatenate([y_unit] * n_repeats + [y_unit[:epsilon]]) elif len(y) < length: y = np.concatenate([y, np.zeros(length - len(y))]) elif len(y) > length: if not is_train: start = start or 0 else: start = start or np.random.randint(len(y) - length) y = y[start : start + length] return torch.tensor(y, dtype=torch.float32) def wavtoimg(self, wavs, time_max): wavs = torch.stack([self.crop_or_pad(wav, length=time_max) for wav in wavs]) power = random.uniform(self.CFG.augpower_min, self.CFG.augpower_min) melimg = torchaudio.transforms.MelSpectrogram( n_mels=self.CFG.n_mel, sample_rate=self.CFG.sr, f_min=self.CFG.fmin, f_max=self.CFG.fmax, n_fft=self.CFG.n_fft, hop_length=self.CFG.hop_len, norm=None, power=power, mel_scale="htk", )(wavs) dbimg = self.ptodb(melimg) img = (dbimg.to(torch.float32) + 80) / 80 return img[:, :, :-1] def inner_mixup(self, x, x_mix, batch_size, factor): perms = torch.randperm(factor).to(x.device) for i, perm in enumerate(perms): x_mix[:, :, i * self.CFG.frame : (i + 1) * self.CFG.frame] = x[ :, :, perm * self.CFG.frame : (perm + 1) * self.CFG.frame ] lam1, lam2 = self.get_lambda(batch_size, self.mixupalpha_in) lam1, lam2 = lam1.to(x.device), lam2.to(x.device) x = lam1[:, None, None] * x + lam2[:, None, None] * x_mix return x def __call__(self, batch): audios1, audios2, labels, weights = list(zip(*batch)) labels = torch.stack(labels) weights = torch.stack(weights) # calculate max time length of this batch # time_array = np.append( # np.array([len(ad) for ad in audios1]), # np.array([len(ad) for ad in audios2]), # axis=0) # print(time_array) # #frame Nomalization # time_max = max(self.unit, time_array.max()//self.unit * self.unit) # time_max = min(self.unit*self.CFG.factors, time_max) img1 = self.wavtoimg(audios1, self.cutoff) img2 = self.wavtoimg(audios2, self.cutoff) if random.uniform(0, 1) < self.CFG.mixup_in_prob1: img1 = self.inner_mixup(img1, img1, len(batch), time_max // self.unit) if random.uniform(0, 1) < self.CFG.mixup_in_prob2: img2 = self.inner_mixup(img2, img2, len(batch), time_max // self.unit) if random.uniform(0, 1) < self.CFG.mixup_out_prob: lam1, lam2 = self.get_lambda(len(batch), self.mixupalpha_out) lam1, lam2 = lam1.to(img1.device), lam2.to(img1.device) imgs = lam1[:, None, None] * img1 + lam2[:, None, None] * img2 labels = lam1[:, None] * labels[:, 0, :] + lam2[:, None] * labels[:, 1, :] else: imgs = img1 labels = labels[:, 0, :] # setting cutoff imgs = imgs[:, :, : self.cutoff] return audios1, audios2, img1, img2, imgs, labels, weights train = pd.read_csv("/kaggle/input/birdclef2023-split-creating-dataframe/train.csv") train["labels_id"] = train.labels_id.apply(eval) train["filename"] = train.filename.apply(lambda x: x.split("/")[-1]) pdf = pd.DataFrame( glob.glob("/kaggle/input/birdclef-2023/train_audio/**/*.ogg"), columns=["audio_paths"], ) pdf["filename"] = pdf.apply(lambda x: x["audio_paths"].split("/")[-1], axis=1) train = pd.merge(train, pdf, on=["filename"]) train["weight"] = 1 # with open('/kaggle/input/birdclef2023-soundobjectdetectionanalysis/mask_array.pkl', 'rb') as f: # mask_array = pickle.load(f) class CFG: CLASS_NUM = 264 # image parameter sr = 32000 period = 5 n_mel = 128 fmin = 50 fmax = 14000 power = 2 top_db = None prilabelp = 1.0 seclabelp = 0.5 frame = 500 augpower_min = 1.9 augpower_max = 2.1 mixup_in_prob1 = 1.0 mixup_in_prob2 = 1.0 mixup_out_prob = 0.15 mixup_in_prob = 1.0 backbone_dropout = 0.2 backbone_droppath = 0.2 head_dropout = 0.2 mixup_alpha_in = 5.0 mixup_alpha_out = 5.0 sample_size = 300 # time_len = sr[1/s] * time[s] /hop_len = sr[1/s] * time[s] 4/n_fft n_fft = 1024 hop_len = 320 # バッチサイズ batch_size = 20 # 前処理CPUコア数 workers = 20 # 学習率 (best range 5e-9~2e-4) lr = 5e-3 # スケジューラーの最小学習率 min_lr = 1e-5 # ウォームアップステップ warmupstep = 0 # エポック数 epochs = 40 # factor update # factors = list([15,14,13,12,11,10,10,9,9,9,8,8,8,7,7,7,6,6,6]) + list([max(1, 6 - i//3) for i in range(30)]) factors = 6 # + list([max(1, 6 - i//3) for i in range(epochs)]) batch_factor = {9: 1, 8: 1, 7: 1, 6: 1, 5: 2, 4: 2, 3: 3, 2: 3, 1: 3} # lr ratio (best fit 3) lr_ratio = 5 # label smoothing rate smooth = 0.005 ds = WaveformDataset(CFG, train, mask=None, period=30) collate_fn = Collate(CFG) train_loader = DataLoader( ds, batch_size=1, drop_last=True, pin_memory=True, shuffle=True, num_workers=1, collate_fn=collate_fn, ) for idx, (a1, a2, img1, img2, img, label, weight) in enumerate(train_loader): # fig, axes = plt.subplots(2, 2, figsize=(20, 5),dpi=300) # for i in range(2): # i=0 # for j in range(2): # axes[i, j].imshow(img[i][j]) # axes[2*i+1, j].imshow(img[i][j])#*mask[i, j]) # タイトルと軸ラベルを重ならないように調整 # plt.tight_layout() # plt.figure(figsize=(20,5),dpi=300) plt.imshow(img1.numpy()[0]) plt.show() plt.imshow(img2.numpy()[0]) plt.show() plt.figure(figsize=(20, 5), dpi=300) plt.imshow(img.numpy()[0]) plt.show() if idx == 50: break # collate_fn.wavtoimg(a1,time_max=32000*5) # import concurrent.futures # from tqdm import tqdm # import librosa # datas = {} # def load_data(row): # data, sr = librosa.load(row.audio_paths, duration=15*5) # return row.filename_id, data # with concurrent.futures.ThreadPoolExecutor() as executor: # results = list(tqdm(executor.map(load_data, train.itertuples(index=False)), total=len(train))) # for filename_id, data in results: # datas[filename_id] = data
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/456/129456040.ipynb
birdclef2021-background-noise
christofhenkel
[{"Id": 129456040, "ScriptId": 38260783, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6220676, "CreationDate": "05/14/2023 01:47:13", "VersionNumber": 1.0, "Title": "[EDA]DatasetEDA", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 433.0, "LinesInsertedFromPrevious": 433.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185525853, "KernelVersionId": 129456040, "SourceDatasetVersionId": 2315253}, {"Id": 185525854, "KernelVersionId": 129456040, "SourceDatasetVersionId": 5411772}]
[{"Id": 2315253, "DatasetId": 1389941, "DatasourceVersionId": 2356678, "CreatorUserId": 1424766, "LicenseName": "CC0: Public Domain", "CreationDate": "06/08/2021 18:58:35", "VersionNumber": 2.0, "Title": "BirdCLEF2021 background noise", "Slug": "birdclef2021-background-noise", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Updated data", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1389941, "CreatorUserId": 1424766, "OwnerUserId": 1424766.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2315253.0, "CurrentDatasourceVersionId": 2356678.0, "ForumId": 1409170, "Type": 2, "CreationDate": "06/05/2021 07:27:13", "LastActivityDate": "06/05/2021", "TotalViews": 2187, "TotalDownloads": 275, "TotalVotes": 9, "TotalKernels": 5}]
[{"Id": 1424766, "UserName": "christofhenkel", "DisplayName": "Dieter", "RegisterDate": "11/17/2017", "PerformanceTier": 4}]
import os, sys, re, glob, random import pandas as pd import librosa as lb import IPython.display as ipd import soundfile as sf import numpy as np import ast, joblib, pickle from pathlib import Path import librosa.display from sklearn import preprocessing # Deep learning from pytorch import torch, torchaudio import torchvision from torch.utils.data import DataLoader, Dataset import torch.optim as optim from torchvision import transforms import torch.nn as nn import torch.nn.functional as F from timm.scheduler import CosineLRScheduler from albumentations.core.transforms_interface import ImageOnlyTransform from torchlibrosa.stft import LogmelFilterBank, Spectrogram from torchlibrosa.augmentation import SpecAugmentation from tqdm import tqdm from torch.nn.parameter import Parameter import copy, codecs import sklearn.metrics import audiomentations as AA import matplotlib.pyplot as plt train_aug = AA.Compose( [ AA.AddBackgroundNoise( sounds_path="/kaggle/input/birdclef2021-background-noise/ff1010bird_nocall/nocall", min_snr_in_db=5, max_snr_in_db=10, p=0.5, ), AA.AddBackgroundNoise( sounds_path="/kaggle/input/birdclef2021-background-noise/train_soundscapes/nocall", min_snr_in_db=5, max_snr_in_db=10, p=0.5, ), AA.AddBackgroundNoise( sounds_path="/kaggle/input/birdclef2021-background-noise/aicrowd2020_noise_30sec/noise_30sec", min_snr_in_db=5, max_snr_in_db=10, p=0.75, ), AA.AddBackgroundNoise( sounds_path="/kaggle/input/birdclef2023esc50-sample/useesc50", min_snr_in_db=5, max_snr_in_db=10, p=0.75, ), AA.AddGaussianSNR(min_snr_in_db=5, max_snr_in_db=10.0, p=0.25), ] ) class WaveformDataset(Dataset): def __init__( self, CFG, df: pd.DataFrame, period=5, prilabelp=1.0, seclabelp=0.5, mixup_prob=0.15, smooth=0.005, mask=None, ): self.df = df.reset_index(drop=True) self.CFG = CFG self.aug = train_aug self.sr = CFG.sr self.period = period self.df["sort_index"] = self.df.index self.smooth = smooth self.prilabelp = prilabelp - self.smooth self.seclabelp = seclabelp - self.smooth self.train = train if mask is not None: print("set mask") self.mask = mask else: self.mask = None self.period_idx = int(self.period * 100) self.mel = torchaudio.transforms.MelSpectrogram( n_mels=CFG.n_mel, sample_rate=CFG.sr, f_min=CFG.fmin, f_max=CFG.fmax, n_fft=CFG.n_fft, hop_length=CFG.hop_len, norm=None, power=CFG.power, mel_scale="htk", ) self.ptodb = torchaudio.transforms.AmplitudeToDB(top_db=CFG.top_db) # Matrix Factorization (サブラベル同士は相関なしとして扱う) self.mfdf = ( self.df[(self.df.sec_num > 0)][["label_id", "labels_id"]] .explode("labels_id") .reset_index(drop=True) ) # mixupするlabel_idリストを作成する self.mixup_idlist = ( self.mfdf.groupby("label_id").labels_id.apply(list).to_dict() ) # mixupする先はシングルラベルにする sdf = self.df[(self.df.sec_num == 0) | (self.df.primary_label == "lotcor1")] # label_idリストからレコード番号を取得し、レコード番号からランダムサンプリングする self.id2record = sdf.groupby("label_id").sort_index.apply(list) self.train = self.df[self.df.sec > 10].reset_index(drop=True) def crop_or_pad(self, y, length, is_train=False, start=None): if len(y) < length // 2: if is_train: wid = length // 2 - len(y) start = np.random.randint(length // 2, length // 2 + wid) y_cp = np.zeros(length, dtype=np.float32) y_cp[start : start + len(y)] = y y = y_cp else: y = np.concatenate([y, np.zeros(length - len(y))]) elif len(y) < length: y = np.concatenate([y, np.zeros(length - len(y))]) elif len(y) > length: if not is_train: start = start or 0 else: start = start or np.random.randint(len(y) - length) y = y[start : start + length] return y def __len__(self): return len(self.train) def get_audio(self, row, offset=0): # データ読み込み data, sr = librosa.load(row.audio_paths, sr=self.sr, mono=True) # augemnt1 if random.uniform(0, 1) < row.weight: data = self.aug(samples=data, sample_rate=sr) # test datasetの最大長 max_sec = len(data) // sr # 0秒の場合は1秒として取り扱う max_sec = 1 if max_sec == 0 else max_sec return data def load_audio(self, row): data = self.get_audio(row) labels = torch.zeros(self.CFG.CLASS_NUM, dtype=torch.float32) + self.smooth if row.sec_num != 0: labels[row.labels_id] = self.seclabelp if row.label_id != -1: labels[row.label_id] = self.prilabelp return data, labels def __getitem__(self, idx): row = self.train.iloc[idx] audio1, label1 = self.load_audio(row) if row.label_id in list(self.mixup_idlist.keys()): # FMからペアとなるラベルIDを取得 pair_label_id = np.random.choice(self.mixup_idlist[row.label_id]) pair_idx = np.random.choice(self.id2record[pair_label_id]) row2 = self.df.iloc[pair_idx] audio2, label2 = self.load_audio(row2) label = torch.stack([label1, label2]) else: audio2 = audio1 label = torch.stack([label1, label1]) weight = torch.tensor(row.weight, dtype=torch.float32) return audio1, audio2, label, weight class Collate: def __init__(self, CFG): self.CFG = CFG self.unit = self.CFG.period * self.CFG.sr self.cutoff = self.CFG.factors * self.CFG.frame self.ptodb = torchaudio.transforms.AmplitudeToDB(top_db=CFG.top_db) self.random_state = np.random.RandomState(42) self.mixupalpha_out = CFG.mixup_alpha_out self.mixupalpha_in = CFG.mixup_alpha_in def get_lambda(self, batch_size, mixup_alpha): lams = [] inv_lams = [] for _ in range(batch_size): lam = self.random_state.beta(mixup_alpha, mixup_alpha, 1)[0] lams.append(lam) inv_lams.append(1.0 - lam) return torch.tensor(lams, dtype=torch.float32), torch.tensor( inv_lams, dtype=torch.float32 ) def crop_or_pad(self, y, length, start=None, is_train=False): unit_len = min(length, self.unit * 3) if len(y) < unit_len: start = np.random.randint(0, unit_len - len(y)) y_unit = np.zeros(unit_len, dtype=np.float32) y_unit[start : start + len(y)] = y n_repeats = length // unit_len epsilon = length % unit_len y = np.concatenate([y_unit] * n_repeats + [y_unit[:epsilon]]) elif len(y) < length: y = np.concatenate([y, np.zeros(length - len(y))]) elif len(y) > length: if not is_train: start = start or 0 else: start = start or np.random.randint(len(y) - length) y = y[start : start + length] return torch.tensor(y, dtype=torch.float32) def wavtoimg(self, wavs, time_max): wavs = torch.stack([self.crop_or_pad(wav, length=time_max) for wav in wavs]) power = random.uniform(self.CFG.augpower_min, self.CFG.augpower_min) melimg = torchaudio.transforms.MelSpectrogram( n_mels=self.CFG.n_mel, sample_rate=self.CFG.sr, f_min=self.CFG.fmin, f_max=self.CFG.fmax, n_fft=self.CFG.n_fft, hop_length=self.CFG.hop_len, norm=None, power=power, mel_scale="htk", )(wavs) dbimg = self.ptodb(melimg) img = (dbimg.to(torch.float32) + 80) / 80 return img[:, :, :-1] def inner_mixup(self, x, x_mix, batch_size, factor): perms = torch.randperm(factor).to(x.device) for i, perm in enumerate(perms): x_mix[:, :, i * self.CFG.frame : (i + 1) * self.CFG.frame] = x[ :, :, perm * self.CFG.frame : (perm + 1) * self.CFG.frame ] lam1, lam2 = self.get_lambda(batch_size, self.mixupalpha_in) lam1, lam2 = lam1.to(x.device), lam2.to(x.device) x = lam1[:, None, None] * x + lam2[:, None, None] * x_mix return x def __call__(self, batch): audios1, audios2, labels, weights = list(zip(*batch)) labels = torch.stack(labels) weights = torch.stack(weights) # calculate max time length of this batch # time_array = np.append( # np.array([len(ad) for ad in audios1]), # np.array([len(ad) for ad in audios2]), # axis=0) # print(time_array) # #frame Nomalization # time_max = max(self.unit, time_array.max()//self.unit * self.unit) # time_max = min(self.unit*self.CFG.factors, time_max) img1 = self.wavtoimg(audios1, self.cutoff) img2 = self.wavtoimg(audios2, self.cutoff) if random.uniform(0, 1) < self.CFG.mixup_in_prob1: img1 = self.inner_mixup(img1, img1, len(batch), time_max // self.unit) if random.uniform(0, 1) < self.CFG.mixup_in_prob2: img2 = self.inner_mixup(img2, img2, len(batch), time_max // self.unit) if random.uniform(0, 1) < self.CFG.mixup_out_prob: lam1, lam2 = self.get_lambda(len(batch), self.mixupalpha_out) lam1, lam2 = lam1.to(img1.device), lam2.to(img1.device) imgs = lam1[:, None, None] * img1 + lam2[:, None, None] * img2 labels = lam1[:, None] * labels[:, 0, :] + lam2[:, None] * labels[:, 1, :] else: imgs = img1 labels = labels[:, 0, :] # setting cutoff imgs = imgs[:, :, : self.cutoff] return audios1, audios2, img1, img2, imgs, labels, weights train = pd.read_csv("/kaggle/input/birdclef2023-split-creating-dataframe/train.csv") train["labels_id"] = train.labels_id.apply(eval) train["filename"] = train.filename.apply(lambda x: x.split("/")[-1]) pdf = pd.DataFrame( glob.glob("/kaggle/input/birdclef-2023/train_audio/**/*.ogg"), columns=["audio_paths"], ) pdf["filename"] = pdf.apply(lambda x: x["audio_paths"].split("/")[-1], axis=1) train = pd.merge(train, pdf, on=["filename"]) train["weight"] = 1 # with open('/kaggle/input/birdclef2023-soundobjectdetectionanalysis/mask_array.pkl', 'rb') as f: # mask_array = pickle.load(f) class CFG: CLASS_NUM = 264 # image parameter sr = 32000 period = 5 n_mel = 128 fmin = 50 fmax = 14000 power = 2 top_db = None prilabelp = 1.0 seclabelp = 0.5 frame = 500 augpower_min = 1.9 augpower_max = 2.1 mixup_in_prob1 = 1.0 mixup_in_prob2 = 1.0 mixup_out_prob = 0.15 mixup_in_prob = 1.0 backbone_dropout = 0.2 backbone_droppath = 0.2 head_dropout = 0.2 mixup_alpha_in = 5.0 mixup_alpha_out = 5.0 sample_size = 300 # time_len = sr[1/s] * time[s] /hop_len = sr[1/s] * time[s] 4/n_fft n_fft = 1024 hop_len = 320 # バッチサイズ batch_size = 20 # 前処理CPUコア数 workers = 20 # 学習率 (best range 5e-9~2e-4) lr = 5e-3 # スケジューラーの最小学習率 min_lr = 1e-5 # ウォームアップステップ warmupstep = 0 # エポック数 epochs = 40 # factor update # factors = list([15,14,13,12,11,10,10,9,9,9,8,8,8,7,7,7,6,6,6]) + list([max(1, 6 - i//3) for i in range(30)]) factors = 6 # + list([max(1, 6 - i//3) for i in range(epochs)]) batch_factor = {9: 1, 8: 1, 7: 1, 6: 1, 5: 2, 4: 2, 3: 3, 2: 3, 1: 3} # lr ratio (best fit 3) lr_ratio = 5 # label smoothing rate smooth = 0.005 ds = WaveformDataset(CFG, train, mask=None, period=30) collate_fn = Collate(CFG) train_loader = DataLoader( ds, batch_size=1, drop_last=True, pin_memory=True, shuffle=True, num_workers=1, collate_fn=collate_fn, ) for idx, (a1, a2, img1, img2, img, label, weight) in enumerate(train_loader): # fig, axes = plt.subplots(2, 2, figsize=(20, 5),dpi=300) # for i in range(2): # i=0 # for j in range(2): # axes[i, j].imshow(img[i][j]) # axes[2*i+1, j].imshow(img[i][j])#*mask[i, j]) # タイトルと軸ラベルを重ならないように調整 # plt.tight_layout() # plt.figure(figsize=(20,5),dpi=300) plt.imshow(img1.numpy()[0]) plt.show() plt.imshow(img2.numpy()[0]) plt.show() plt.figure(figsize=(20, 5), dpi=300) plt.imshow(img.numpy()[0]) plt.show() if idx == 50: break # collate_fn.wavtoimg(a1,time_max=32000*5) # import concurrent.futures # from tqdm import tqdm # import librosa # datas = {} # def load_data(row): # data, sr = librosa.load(row.audio_paths, duration=15*5) # return row.filename_id, data # with concurrent.futures.ThreadPoolExecutor() as executor: # results = list(tqdm(executor.map(load_data, train.itertuples(index=False)), total=len(train))) # for filename_id, data in results: # datas[filename_id] = data
false
1
4,605
0
4,640
4,605
129456876
<jupyter_start><jupyter_text>Amazon Books Reviews ![ERD Digram](https://i.imgur.com/9ELRD7G.png) This dataset contains 2 files as you see in the figure above <br> The first file** reviews** file contain feedback about 3M user on 212404 unique books the data set is part of the Amazon review Dataset it contains product reviews and metadata from Amazon, including 142.8 million reviews spanning May 1996 - July 2014. and this file has these attributes | Features | Description | | --- | --- | | id | The Id of Book | |Title|Book Title| |Price|The price of Book | |User_id|Id of the user who rates the book| |profileName|Name of the user who rates the book| |review/helpfulness|helpfulness rating of the review, e.g. 2/3| |review/score|rating from 0 to 5 for the book| |review/time|time of given the review| |review/summary|the summary of a text review| |review/text|the full text of a review| | | The second file **Books Details** file contains details information about 212404 unique books it file is built by using [google books API](https://developers.google.com/books/docs/overview) to get details information about books it rated in the first file and this file contains | Features | Description | | --- | --- | |Title|Book Title| |Descripe |decription of book | |authors|Neme of book authors| |image|url for book cover| |previewLink|link to access this book on google Books| |publisher|Name of the publisheer| |publishedDate|the date of publish| |infoLink|link to get more information about the book on google books| |categories|genres of books| |ratingsCount|averaging rating for book| | | ## recommender tasks on the data set * recommender system * sentiment analysis * text classification * text clustering * gans generate book cover * data analysis * visualization I wish this dataset by helpful for you 😃😃 Kaggle dataset identifier: amazon-books-reviews <jupyter_script>import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.metrics import recall_score from scipy import stats from scipy.sparse import csr_matrix print("loading data...") df = pd.read_csv("/kaggle/input/amazon-books-reviews/Books_rating.csv") df = df[["Id", "User_id", "Title", "review/score", "review/time"]] df.rename( columns={ "Id": "ProductId", "User_id": "UserId", "review/time": "Time", "Title": "title", "review/score": "Score", }, inplace=True, ) df.head() df = df.dropna(subset=["UserId"]) print("Size of 'ProductId' column:", len(df["ProductId"])) print("Size of 'UserId' column:", len(df["UserId"])) # Define the threshold values product_id_threshold = 500 user_id_threshold = 10 # Count the occurrences of ProductId and UserId product_id_counts = df["ProductId"].value_counts() user_id_counts = df["UserId"].value_counts() # Filter out rows below the threshold filtered_df = df[ ( df["ProductId"].isin( product_id_counts[product_id_counts >= product_id_threshold].index ) ) & (df["UserId"].isin(user_id_counts[user_id_counts >= user_id_threshold].index)) ] print("Size of 'ProductId' column:", len(filtered_df["ProductId"])) print("Size of 'UserId' column:", len(filtered_df["UserId"])) # Get unique UserIds and ProductIds unique_user_ids = filtered_df["UserId"].unique() unique_product_ids = filtered_df["ProductId"].unique() # Create a dictionary to map UserIds and ProductIds to indices user_id_to_index = {user_id: index for index, user_id in enumerate(unique_user_ids)} product_id_to_index = { product_id: index for index, product_id in enumerate(unique_product_ids) } # Initialize the matrix with zeros matrix = np.zeros((len(unique_user_ids), len(unique_product_ids))) # Fill in the matrix with scores for _, row in filtered_df.iterrows(): user_id = row["UserId"] product_id = row["ProductId"] score = row["Score"] user_index = user_id_to_index[user_id] product_index = product_id_to_index[product_id] if matrix[user_index][product_index] < score: matrix[user_index][product_index] = score print(matrix.shape) matrix matrix = stats.zscore(matrix, axis=0) # Print the shape and values of the normalized matrix # print(matrix.shape) # print(matrix) def calculate_mse(predicted_matrix, test_matrix): num_users = min(predicted_matrix.shape[0], test_matrix.shape[0]) num_items = min(predicted_matrix.shape[1], test_matrix.shape[1]) mse = np.mean( (predicted_matrix[:num_users, :num_items] - test_matrix[:num_users, :num_items]) ** 2 ) return mse def calculate_f1_score(recall, precision): if recall + precision == 0: return 0 f1_score = 2 * (precision * recall) / (precision + recall) return f1_score def precision_at_k(actual_matrix, predicted_matrix, k, threshold): binary_predicted_matrix = predicted_matrix >= threshold precision = [] for i in range(len(actual_matrix)): actual_indices = np.where(actual_matrix[i] >= threshold)[0] predicted_indices = np.argsort(~binary_predicted_matrix[i])[:k] common_indices = np.intersect1d(actual_indices, predicted_indices) precision.append(len(common_indices) / len(predicted_indices)) return np.mean(precision) def recall_at_k(true_matrix, pred_matrix, k, threshold): pred_matrix_sorted = np.argsort(pred_matrix, axis=1)[:, ::-1][:, :k] recall_scores = [] for i in range(len(true_matrix)): true_positives = len( set(pred_matrix_sorted[i]).intersection( set(np.where(true_matrix[i] >= threshold)[0]) ) ) actual_positives = len(np.where(true_matrix[i] >= threshold)[0]) if actual_positives > 0: recall_scores.append(true_positives / actual_positives) recall = np.mean(recall_scores) return recall from scipy.sparse.linalg import svds def split_train_test(matrix, test_size=0.2, random_state=42): train_matrix, test_matrix = train_test_split( matrix, test_size=test_size, random_state=random_state ) return train_matrix, test_matrix def calculate_svd(train_matrix, k=250): train_sparse = csr_matrix(train_matrix) # Perform SVD on the sparse matrix U_train, S_train, VT_train = svds(train_sparse, k=k) # Reverse the singular values, columns of U_train, and rows of VT_train S_train_k = np.diag(S_train[::-1]) U_train_k = U_train[:, ::-1] VT_train_k = VT_train[::-1, :] return U_train_k, S_train_k, VT_train_k # Split the matrix into train and test sets train_matrix, test_matrix = split_train_test(matrix) # Perform SVD on the train matrix U_train, S_train, VT_train = calculate_svd(train_matrix) # Make predictions for the train set U_train_pred = np.dot(train_matrix, VT_train.T) train_pred_matrix = np.dot(U_train_pred, VT_train) # Make predictions for the test set U_test_pred = np.dot(test_matrix, VT_train.T) predicted_matrix = np.dot(U_test_pred, VT_train) # Calculate MSE for the train set train_mse = calculate_mse(train_matrix, train_pred_matrix) # Calculate MSE for the test set test_mse = calculate_mse(test_matrix, predicted_matrix) print("Train Set Mean Squared Error (MSE):", train_mse) print("Test Set Mean Squared Error (MSE):", test_mse) # Calculate Precision at k for the test set precision = precision_at_k(test_matrix, predicted_matrix, k=10, threshold=3) # Calculate Recall at k for the test set recall = recall_at_k(test_matrix, predicted_matrix, k=10, threshold=3) # Calculate F1 score f1_score = calculate_f1_score(recall, precision) print("RMSE: ", np.sqrt(test_mse)) print("Precision: ", precision) print("Recall at 10:", recall) print("F1 Score:", f1_score) def fetch_relevant_items_for_user(user_id, relevant_items=5): # Get the index of the user user_index = user_id_to_index[user_id] user_embedding = U_train[user_index, :] similarity_scores = VT_train.T.dot(user_embedding) sorted_indices = similarity_scores.argsort()[::-1] top_relevant_indices = sorted_indices[:relevant_items] relevant_items = [ list(product_id_to_index.keys())[list(product_id_to_index.values()).index(idx)] for idx in top_relevant_indices ] relevant_titles = df.loc[df["ProductId"].isin(relevant_items), "title"].tolist() # Remove any duplicate titles unique_relevant_titles = list(set(relevant_titles)) # Get the final set of relevant items without duplicate titles final_relevant_items = [] for title in unique_relevant_titles: final_relevant_items.append(title) return final_relevant_items user_id = "A1O4UBZ7G4ID65" relevant_items = 5 relevant_items = fetch_relevant_items_for_user(user_id, relevant_items) print(f"User: {user_id}") print("Relevant Items:") for i, item in enumerate(relevant_items): print(f"{i+1}. {item}")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/456/129456876.ipynb
amazon-books-reviews
mohamedbakhet
[{"Id": 129456876, "ScriptId": 38479821, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14866635, "CreationDate": "05/14/2023 02:03:11", "VersionNumber": 1.0, "Title": "Reccomendation_System_Amazon_book_Reviews", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 186.0, "LinesInsertedFromPrevious": 186.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185527807, "KernelVersionId": 129456876, "SourceDatasetVersionId": 4200454}]
[{"Id": 4200454, "DatasetId": 2476732, "DatasourceVersionId": 4257402, "CreatorUserId": 5893711, "LicenseName": "CC0: Public Domain", "CreationDate": "09/13/2022 23:04:08", "VersionNumber": 1.0, "Title": "Amazon Books Reviews", "Slug": "amazon-books-reviews", "Subtitle": "Goodreads-books reviews and descriptions of each book", "Description": "![ERD Digram](https://i.imgur.com/9ELRD7G.png)\n \nThis dataset contains 2 files as you see in the figure above \n<br>\nThe first file** reviews** file contain feedback about 3M user on 212404 unique books the data set is part of the Amazon review Dataset it contains product reviews and metadata from Amazon, including 142.8 million reviews spanning May 1996 - July 2014.\nand this file has these attributes\n\n| Features | Description | \n| --- | --- |\n| id | The Id of Book |\n|Title|Book Title| \n|Price|The price of Book |\n|User_id|Id of the user who rates the book|\n|profileName|Name of the user who rates the book|\n|review/helpfulness|helpfulness rating of the review, e.g. 2/3|\n|review/score|rating from 0 to 5 for the book|\n|review/time|time of given the review|\n|review/summary|the summary of a text review|\n|review/text|the full text of a review|\n| | \n\nThe second file **Books Details** file contains details information about 212404 unique books it file is built by using \n[google books API](https://developers.google.com/books/docs/overview) to get details information about books it rated in the first file \nand this file contains \n\n| Features | Description | \n| --- | --- |\n|Title|Book Title|\n|Descripe |decription of book |\n|authors|Neme of book authors|\n|image|url for book cover|\n|previewLink|link to access this book on google Books|\n|publisher|Name of the publisheer|\n|publishedDate|the date of publish|\n|infoLink|link to get more information about the book on google books|\n|categories|genres of books|\n|ratingsCount|averaging rating for book|\n| | \n## recommender tasks on the data set\n* recommender system\n* sentiment analysis \n* text classification \n* text clustering\n* gans generate book cover \n* data analysis \n* visualization \n\nI wish this dataset by helpful for you \ud83d\ude03\ud83d\ude03", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2476732, "CreatorUserId": 5893711, "OwnerUserId": 5893711.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4200454.0, "CurrentDatasourceVersionId": 4257402.0, "ForumId": 2504639, "Type": 2, "CreationDate": "09/13/2022 23:04:08", "LastActivityDate": "09/13/2022", "TotalViews": 39831, "TotalDownloads": 7008, "TotalVotes": 101, "TotalKernels": 15}]
[{"Id": 5893711, "UserName": "mohamedbakhet", "DisplayName": "Mohamed Bekheet", "RegisterDate": "10/05/2020", "PerformanceTier": 2}]
import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.metrics import recall_score from scipy import stats from scipy.sparse import csr_matrix print("loading data...") df = pd.read_csv("/kaggle/input/amazon-books-reviews/Books_rating.csv") df = df[["Id", "User_id", "Title", "review/score", "review/time"]] df.rename( columns={ "Id": "ProductId", "User_id": "UserId", "review/time": "Time", "Title": "title", "review/score": "Score", }, inplace=True, ) df.head() df = df.dropna(subset=["UserId"]) print("Size of 'ProductId' column:", len(df["ProductId"])) print("Size of 'UserId' column:", len(df["UserId"])) # Define the threshold values product_id_threshold = 500 user_id_threshold = 10 # Count the occurrences of ProductId and UserId product_id_counts = df["ProductId"].value_counts() user_id_counts = df["UserId"].value_counts() # Filter out rows below the threshold filtered_df = df[ ( df["ProductId"].isin( product_id_counts[product_id_counts >= product_id_threshold].index ) ) & (df["UserId"].isin(user_id_counts[user_id_counts >= user_id_threshold].index)) ] print("Size of 'ProductId' column:", len(filtered_df["ProductId"])) print("Size of 'UserId' column:", len(filtered_df["UserId"])) # Get unique UserIds and ProductIds unique_user_ids = filtered_df["UserId"].unique() unique_product_ids = filtered_df["ProductId"].unique() # Create a dictionary to map UserIds and ProductIds to indices user_id_to_index = {user_id: index for index, user_id in enumerate(unique_user_ids)} product_id_to_index = { product_id: index for index, product_id in enumerate(unique_product_ids) } # Initialize the matrix with zeros matrix = np.zeros((len(unique_user_ids), len(unique_product_ids))) # Fill in the matrix with scores for _, row in filtered_df.iterrows(): user_id = row["UserId"] product_id = row["ProductId"] score = row["Score"] user_index = user_id_to_index[user_id] product_index = product_id_to_index[product_id] if matrix[user_index][product_index] < score: matrix[user_index][product_index] = score print(matrix.shape) matrix matrix = stats.zscore(matrix, axis=0) # Print the shape and values of the normalized matrix # print(matrix.shape) # print(matrix) def calculate_mse(predicted_matrix, test_matrix): num_users = min(predicted_matrix.shape[0], test_matrix.shape[0]) num_items = min(predicted_matrix.shape[1], test_matrix.shape[1]) mse = np.mean( (predicted_matrix[:num_users, :num_items] - test_matrix[:num_users, :num_items]) ** 2 ) return mse def calculate_f1_score(recall, precision): if recall + precision == 0: return 0 f1_score = 2 * (precision * recall) / (precision + recall) return f1_score def precision_at_k(actual_matrix, predicted_matrix, k, threshold): binary_predicted_matrix = predicted_matrix >= threshold precision = [] for i in range(len(actual_matrix)): actual_indices = np.where(actual_matrix[i] >= threshold)[0] predicted_indices = np.argsort(~binary_predicted_matrix[i])[:k] common_indices = np.intersect1d(actual_indices, predicted_indices) precision.append(len(common_indices) / len(predicted_indices)) return np.mean(precision) def recall_at_k(true_matrix, pred_matrix, k, threshold): pred_matrix_sorted = np.argsort(pred_matrix, axis=1)[:, ::-1][:, :k] recall_scores = [] for i in range(len(true_matrix)): true_positives = len( set(pred_matrix_sorted[i]).intersection( set(np.where(true_matrix[i] >= threshold)[0]) ) ) actual_positives = len(np.where(true_matrix[i] >= threshold)[0]) if actual_positives > 0: recall_scores.append(true_positives / actual_positives) recall = np.mean(recall_scores) return recall from scipy.sparse.linalg import svds def split_train_test(matrix, test_size=0.2, random_state=42): train_matrix, test_matrix = train_test_split( matrix, test_size=test_size, random_state=random_state ) return train_matrix, test_matrix def calculate_svd(train_matrix, k=250): train_sparse = csr_matrix(train_matrix) # Perform SVD on the sparse matrix U_train, S_train, VT_train = svds(train_sparse, k=k) # Reverse the singular values, columns of U_train, and rows of VT_train S_train_k = np.diag(S_train[::-1]) U_train_k = U_train[:, ::-1] VT_train_k = VT_train[::-1, :] return U_train_k, S_train_k, VT_train_k # Split the matrix into train and test sets train_matrix, test_matrix = split_train_test(matrix) # Perform SVD on the train matrix U_train, S_train, VT_train = calculate_svd(train_matrix) # Make predictions for the train set U_train_pred = np.dot(train_matrix, VT_train.T) train_pred_matrix = np.dot(U_train_pred, VT_train) # Make predictions for the test set U_test_pred = np.dot(test_matrix, VT_train.T) predicted_matrix = np.dot(U_test_pred, VT_train) # Calculate MSE for the train set train_mse = calculate_mse(train_matrix, train_pred_matrix) # Calculate MSE for the test set test_mse = calculate_mse(test_matrix, predicted_matrix) print("Train Set Mean Squared Error (MSE):", train_mse) print("Test Set Mean Squared Error (MSE):", test_mse) # Calculate Precision at k for the test set precision = precision_at_k(test_matrix, predicted_matrix, k=10, threshold=3) # Calculate Recall at k for the test set recall = recall_at_k(test_matrix, predicted_matrix, k=10, threshold=3) # Calculate F1 score f1_score = calculate_f1_score(recall, precision) print("RMSE: ", np.sqrt(test_mse)) print("Precision: ", precision) print("Recall at 10:", recall) print("F1 Score:", f1_score) def fetch_relevant_items_for_user(user_id, relevant_items=5): # Get the index of the user user_index = user_id_to_index[user_id] user_embedding = U_train[user_index, :] similarity_scores = VT_train.T.dot(user_embedding) sorted_indices = similarity_scores.argsort()[::-1] top_relevant_indices = sorted_indices[:relevant_items] relevant_items = [ list(product_id_to_index.keys())[list(product_id_to_index.values()).index(idx)] for idx in top_relevant_indices ] relevant_titles = df.loc[df["ProductId"].isin(relevant_items), "title"].tolist() # Remove any duplicate titles unique_relevant_titles = list(set(relevant_titles)) # Get the final set of relevant items without duplicate titles final_relevant_items = [] for title in unique_relevant_titles: final_relevant_items.append(title) return final_relevant_items user_id = "A1O4UBZ7G4ID65" relevant_items = 5 relevant_items = fetch_relevant_items_for_user(user_id, relevant_items) print(f"User: {user_id}") print("Relevant Items:") for i, item in enumerate(relevant_items): print(f"{i+1}. {item}")
false
1
2,146
0
2,702
2,146
129421437
<jupyter_start><jupyter_text>Heart Attack Analysis & Prediction Dataset ## Hone your analytical and ML skills by participating in tasks of my other dataset's. Given below. [Data Science Job Posting on Glassdoor](https://www.kaggle.com/rashikrahmanpritom/data-science-job-posting-on-glassdoor) [Groceries dataset for Market Basket Analysis(MBA)](https://www.kaggle.com/rashikrahmanpritom/groceries-dataset-for-market-basket-analysismba) [Dataset for Facial recognition using ML approach](https://www.kaggle.com/rashikrahmanpritom/dataset-for-facial-recognition-using-ml-approach) [Covid_w/wo_Pneumonia Chest Xray](https://www.kaggle.com/rashikrahmanpritom/covid-wwo-pneumonia-chest-xray) [Disney Movies 1937-2016 Gross Income](https://www.kaggle.com/rashikrahmanpritom/disney-movies-19372016-total-gross) [Bollywood Movie data from 2000 to 2019](https://www.kaggle.com/rashikrahmanpritom/bollywood-movie-data-from-2000-to-2019) [17.7K English song data from 2008-2017](https://www.kaggle.com/rashikrahmanpritom/177k-english-song-data-from-20082017) ## About this dataset - Age : Age of the patient - Sex : Sex of the patient - exang: exercise induced angina (1 = yes; 0 = no) - ca: number of major vessels (0-3) - cp : Chest Pain type chest pain type - Value 1: typical angina - Value 2: atypical angina - Value 3: non-anginal pain - Value 4: asymptomatic - trtbps : resting blood pressure (in mm Hg) - chol : cholestoral in mg/dl fetched via BMI sensor - fbs : (fasting blood sugar &gt; 120 mg/dl) (1 = true; 0 = false) - rest_ecg : resting electrocardiographic results - Value 0: normal - Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of &gt; 0.05 mV) - Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria - thalach : maximum heart rate achieved - target : 0= less chance of heart attack 1= more chance of heart attack n Kaggle dataset identifier: heart-attack-analysis-prediction-dataset <jupyter_code>import pandas as pd df = pd.read_csv('heart-attack-analysis-prediction-dataset/heart.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 303 entries, 0 to 302 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 age 303 non-null int64 1 sex 303 non-null int64 2 cp 303 non-null int64 3 trtbps 303 non-null int64 4 chol 303 non-null int64 5 fbs 303 non-null int64 6 restecg 303 non-null int64 7 thalachh 303 non-null int64 8 exng 303 non-null int64 9 oldpeak 303 non-null float64 10 slp 303 non-null int64 11 caa 303 non-null int64 12 thall 303 non-null int64 13 output 303 non-null int64 dtypes: float64(1), int64(13) memory usage: 33.3 KB <jupyter_text>Examples: { "age": 63.0, "sex": 1.0, "cp": 3.0, "trtbps": 145.0, "chol": 233.0, "fbs": 1.0, "restecg": 0.0, "thalachh": 150.0, "exng": 0.0, "oldpeak": 2.3, "slp": 0.0, "caa": 0.0, "thall": 1.0, "output": 1.0 } { "age": 37.0, "sex": 1.0, "cp": 2.0, "trtbps": 130.0, "chol": 250.0, "fbs": 0.0, "restecg": 1.0, "thalachh": 187.0, "exng": 0.0, "oldpeak": 3.5, "slp": 0.0, "caa": 0.0, "thall": 2.0, "output": 1.0 } { "age": 41.0, "sex": 0.0, "cp": 1.0, "trtbps": 130.0, "chol": 204.0, "fbs": 0.0, "restecg": 0.0, "thalachh": 172.0, "exng": 0.0, "oldpeak": 1.4, "slp": 2.0, "caa": 0.0, "thall": 2.0, "output": 1.0 } { "age": 56.0, "sex": 1.0, "cp": 1.0, "trtbps": 120.0, "chol": 236.0, "fbs": 0.0, "restecg": 1.0, "thalachh": 178.0, "exng": 0.0, "oldpeak": 0.8, "slp": 2.0, "caa": 0.0, "thall": 2.0, "output": 1.0 } <jupyter_script># ## import libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # ## Read data data = pd.read_csv("/kaggle/input/heart-attack-analysis-prediction-dataset/heart.csv") # ## define categorical columns and continuous columns cat_cols = ["sex", "exng", "caa", "cp", "fbs", "restecg", "slp", "thall"] con_cols = ["age", "trtbps", "chol", "thalachh", "oldpeak"] target_col = ["output"] print("The categorial cols are : ", cat_cols) print("The continuous cols are : ", con_cols) print("The target variable is : ", target_col) len(data.columns) data.head(30) data[con_cols].describe() data.shape # getting unique values dict = {} for i in list(data.columns): dict[i] = data[i].value_counts().shape[0] pd.DataFrame(dict, index=["unique count"]).transpose() # ## getting correlation corrmat = data.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=0.8, square=True) # ## Creating count plot for categorical data # count plot function for getting outlier def count_plot(cols): i = 1 sns.set() plt.figure() fig, ax = plt.subplots(3, 3, figsize=(12, 9)) ax0 = plt.subplot(3, 3, 1) ax0.spines["bottom"].set_visible(False) ax0.spines["left"].set_visible(False) ax0.spines["top"].set_visible(False) ax0.spines["right"].set_visible(False) ax0.tick_params(left=False, bottom=False) ax0.set_xticklabels([]) ax0.set_yticklabels([]) ax0.text( 0.5, 0.5, f"Count plots\n_________________", horizontalalignment="center", verticalalignment="center", fontsize=18, fontweight="bold", fontfamily="serif", color="#000000", ) for feature in cols: i += 1 plt.subplot(3, 3, i) sns.countplot(x=data[feature]) plt.xlabel(feature, fontsize=9) fig.tight_layout() plt.show() count_plot(cat_cols) # ## Creating box plot for continuous data # box plot function to get outliers def box_plot(cols): i = 1 sns.set() plt.figure() fig, ax = plt.subplots(3, 2, figsize=(12, 9)) ax0 = plt.subplot(3, 2, 1) ax0.spines["bottom"].set_visible(True) ax0.spines["left"].set_visible(False) ax0.spines["top"].set_visible(False) ax0.spines["right"].set_visible(False) ax0.tick_params(left=False, bottom=False) ax0.set_xticklabels([]) ax0.set_yticklabels([]) ax0.text( 0.5, 0.5, f"box plots\n_________________", horizontalalignment="center", verticalalignment="center", fontsize=18, fontweight="bold", fontfamily="serif", color="#000000", ) for feature in cols: i += 1 plt.subplot(3, 2, i) sns.boxplot(x=data[feature]) plt.xlabel(feature, fontsize=9) fig.tight_layout() plt.show() box_plot(con_cols) # ## removing outlier # # removing outlier for col in con_cols: q1 = data[col].quantile(0.25) print(f"q1 of {col} is {q1}") q3 = data[col].quantile(0.75) print(f"q3 of {col} is {q3}") iqr = q3 - q1 print(f"iqr of {col} is {iqr}") upper_limit = q3 + (1.5 * iqr) print(f"upper limit in {col} is {upper_limit}") data = data.drop(data[data[col] > upper_limit].index) print("*" * 30) data.shape box_plot(con_cols) sns.pairplot(data, hue="output") plt.show() chance_of_heart_attack = sns.countplot(x=data["output"]).set_xticklabels( ["hight chance", "low chance"] ) # ## Importing Models # # Scaling from sklearn.preprocessing import RobustScaler # Train Test Split from sklearn.model_selection import train_test_split # Models import torch import torch.nn as nn from sklearn.svm import SVC from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import GradientBoostingClassifier # Metrics from sklearn.metrics import ( accuracy_score, classification_report, roc_curve, confusion_matrix, ) # Cross Validation from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV # ## encoding and scaling data # encoding the categorical columns data = pd.get_dummies(data, columns=cat_cols, drop_first=True) # defining the features and target X = data.drop(["output"], axis=1) y = data[["output"]] # instantiating the scaler scaler = RobustScaler() # scaling the continuous featuree X[con_cols] = scaler.fit_transform(X[con_cols]) print("The first 5 rows of X are") X.head() print(f"data shape = {data.shape}") print(f"scaled data shape = {X.shape}") X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) print("The shape of X_train is ", X_train.shape) print("The shape of X_test is ", X_test.shape) print("The shape of y_train is ", y_train.shape) print("The shape of y_test is ", y_test.shape) # ## models # support vector machines clf = SVC(kernel="linear", C=1, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # printing the test accuracy print( "The test accuracy score of Logistric Regression is ", accuracy_score(y_test, y_pred), ) print( "The test report of Logistric Regression is", classification_report(y_test, y_pred) ) print("the confusion matrix is : \n", confusion_matrix(y_test, y_pred)) # logistic regression model logreg = LogisticRegression() logreg.fit(X_train, y_train) y_pred_proba = logreg.predict_proba(X_test) y_pred = np.argmax(y_pred_proba, axis=1) # printing the test accuracy print( "The test accuracy score of Logistric Regression is ", accuracy_score(y_test, y_pred), ) print( "The test report of Logistric Regression is", classification_report(y_test, y_pred) ) print("the confusion matrix is : \n", confusion_matrix(y_test, y_pred)) # knn model from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(X_train, y_train) pred = knn.predict(X_test) # printing the test accuracy print( "The test accuracy score of Logistric Regression is ", accuracy_score(y_test, y_pred), ) print( "The test report of Logistric Regression is", classification_report(y_test, y_pred) ) print("the confusion matrix is : \n", confusion_matrix(y_test, y_pred)) # calculating the probabilities y_pred_prob = logreg.predict_proba(X_test)[:, 1] fpr, tpr, threshols = roc_curve(y_test, y_pred_prob) # plotting the curve plt.plot([0, 1], [0, 1], "k--", "r+") plt.plot(fpr, tpr, label="Logistic Regression") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Logistric Regression ROC Curve") plt.show() # Random forest model rf = RandomForestClassifier() rf.fit(X_train, y_train) y_pred = rf.predict(X_test) # printing the test accuracy print( "The test accuracy score of Logistric Regression is ", accuracy_score(y_test, y_pred), ) print( "The test report of Logistric Regression is", classification_report(y_test, y_pred) ) print("the confusion matrix is : \n", confusion_matrix(y_test, y_pred))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/421/129421437.ipynb
heart-attack-analysis-prediction-dataset
rashikrahmanpritom
[{"Id": 129421437, "ScriptId": 38480393, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10194281, "CreationDate": "05/13/2023 16:38:12", "VersionNumber": 1.0, "Title": "91% accuracy Heart Attack classification", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 251.0, "LinesInsertedFromPrevious": 251.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185456923, "KernelVersionId": 129421437, "SourceDatasetVersionId": 2047221}]
[{"Id": 2047221, "DatasetId": 1226038, "DatasourceVersionId": 2087216, "CreatorUserId": 4730101, "LicenseName": "CC0: Public Domain", "CreationDate": "03/22/2021 11:40:59", "VersionNumber": 2.0, "Title": "Heart Attack Analysis & Prediction Dataset", "Slug": "heart-attack-analysis-prediction-dataset", "Subtitle": "A dataset for heart attack classification", "Description": "## Hone your analytical and ML skills by participating in tasks of my other dataset's. Given below.\n\n\n[Data Science Job Posting on Glassdoor](https://www.kaggle.com/rashikrahmanpritom/data-science-job-posting-on-glassdoor)\n\n[Groceries dataset for Market Basket Analysis(MBA)](https://www.kaggle.com/rashikrahmanpritom/groceries-dataset-for-market-basket-analysismba)\n\n[Dataset for Facial recognition using ML approach](https://www.kaggle.com/rashikrahmanpritom/dataset-for-facial-recognition-using-ml-approach)\n\n[Covid_w/wo_Pneumonia Chest Xray](https://www.kaggle.com/rashikrahmanpritom/covid-wwo-pneumonia-chest-xray)\n\n[Disney Movies 1937-2016 Gross Income](https://www.kaggle.com/rashikrahmanpritom/disney-movies-19372016-total-gross)\n\n[Bollywood Movie data from 2000 to 2019](https://www.kaggle.com/rashikrahmanpritom/bollywood-movie-data-from-2000-to-2019)\n\n[17.7K English song data from 2008-2017](https://www.kaggle.com/rashikrahmanpritom/177k-english-song-data-from-20082017)\n\n## About this dataset\n\n- Age : Age of the patient\n\n- Sex : Sex of the patient\n\n- exang: exercise induced angina (1 = yes; 0 = no)\n\n- ca: number of major vessels (0-3)\n\n- cp : Chest Pain type chest pain type\n - Value 1: typical angina\n - Value 2: atypical angina\n - Value 3: non-anginal pain\n - Value 4: asymptomatic\n \n- trtbps : resting blood pressure (in mm Hg)\n- chol : cholestoral in mg/dl fetched via BMI sensor\n- fbs : (fasting blood sugar &gt; 120 mg/dl) (1 = true; 0 = false)\n- rest_ecg : resting electrocardiographic results\n - Value 0: normal\n - Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of &gt; 0.05 mV)\n - Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria\n \n- thalach : maximum heart rate achieved\n- target : 0= less chance of heart attack 1= more chance of heart attack\n\nn", "VersionNotes": "heart csv update", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1226038, "CreatorUserId": 4730101, "OwnerUserId": 4730101.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2047221.0, "CurrentDatasourceVersionId": 2087216.0, "ForumId": 1244179, "Type": 2, "CreationDate": "03/22/2021 08:19:12", "LastActivityDate": "03/22/2021", "TotalViews": 870835, "TotalDownloads": 138216, "TotalVotes": 3197, "TotalKernels": 1050}]
[{"Id": 4730101, "UserName": "rashikrahmanpritom", "DisplayName": "Rashik Rahman", "RegisterDate": "03/24/2020", "PerformanceTier": 3}]
# ## import libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # ## Read data data = pd.read_csv("/kaggle/input/heart-attack-analysis-prediction-dataset/heart.csv") # ## define categorical columns and continuous columns cat_cols = ["sex", "exng", "caa", "cp", "fbs", "restecg", "slp", "thall"] con_cols = ["age", "trtbps", "chol", "thalachh", "oldpeak"] target_col = ["output"] print("The categorial cols are : ", cat_cols) print("The continuous cols are : ", con_cols) print("The target variable is : ", target_col) len(data.columns) data.head(30) data[con_cols].describe() data.shape # getting unique values dict = {} for i in list(data.columns): dict[i] = data[i].value_counts().shape[0] pd.DataFrame(dict, index=["unique count"]).transpose() # ## getting correlation corrmat = data.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=0.8, square=True) # ## Creating count plot for categorical data # count plot function for getting outlier def count_plot(cols): i = 1 sns.set() plt.figure() fig, ax = plt.subplots(3, 3, figsize=(12, 9)) ax0 = plt.subplot(3, 3, 1) ax0.spines["bottom"].set_visible(False) ax0.spines["left"].set_visible(False) ax0.spines["top"].set_visible(False) ax0.spines["right"].set_visible(False) ax0.tick_params(left=False, bottom=False) ax0.set_xticklabels([]) ax0.set_yticklabels([]) ax0.text( 0.5, 0.5, f"Count plots\n_________________", horizontalalignment="center", verticalalignment="center", fontsize=18, fontweight="bold", fontfamily="serif", color="#000000", ) for feature in cols: i += 1 plt.subplot(3, 3, i) sns.countplot(x=data[feature]) plt.xlabel(feature, fontsize=9) fig.tight_layout() plt.show() count_plot(cat_cols) # ## Creating box plot for continuous data # box plot function to get outliers def box_plot(cols): i = 1 sns.set() plt.figure() fig, ax = plt.subplots(3, 2, figsize=(12, 9)) ax0 = plt.subplot(3, 2, 1) ax0.spines["bottom"].set_visible(True) ax0.spines["left"].set_visible(False) ax0.spines["top"].set_visible(False) ax0.spines["right"].set_visible(False) ax0.tick_params(left=False, bottom=False) ax0.set_xticklabels([]) ax0.set_yticklabels([]) ax0.text( 0.5, 0.5, f"box plots\n_________________", horizontalalignment="center", verticalalignment="center", fontsize=18, fontweight="bold", fontfamily="serif", color="#000000", ) for feature in cols: i += 1 plt.subplot(3, 2, i) sns.boxplot(x=data[feature]) plt.xlabel(feature, fontsize=9) fig.tight_layout() plt.show() box_plot(con_cols) # ## removing outlier # # removing outlier for col in con_cols: q1 = data[col].quantile(0.25) print(f"q1 of {col} is {q1}") q3 = data[col].quantile(0.75) print(f"q3 of {col} is {q3}") iqr = q3 - q1 print(f"iqr of {col} is {iqr}") upper_limit = q3 + (1.5 * iqr) print(f"upper limit in {col} is {upper_limit}") data = data.drop(data[data[col] > upper_limit].index) print("*" * 30) data.shape box_plot(con_cols) sns.pairplot(data, hue="output") plt.show() chance_of_heart_attack = sns.countplot(x=data["output"]).set_xticklabels( ["hight chance", "low chance"] ) # ## Importing Models # # Scaling from sklearn.preprocessing import RobustScaler # Train Test Split from sklearn.model_selection import train_test_split # Models import torch import torch.nn as nn from sklearn.svm import SVC from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import GradientBoostingClassifier # Metrics from sklearn.metrics import ( accuracy_score, classification_report, roc_curve, confusion_matrix, ) # Cross Validation from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV # ## encoding and scaling data # encoding the categorical columns data = pd.get_dummies(data, columns=cat_cols, drop_first=True) # defining the features and target X = data.drop(["output"], axis=1) y = data[["output"]] # instantiating the scaler scaler = RobustScaler() # scaling the continuous featuree X[con_cols] = scaler.fit_transform(X[con_cols]) print("The first 5 rows of X are") X.head() print(f"data shape = {data.shape}") print(f"scaled data shape = {X.shape}") X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) print("The shape of X_train is ", X_train.shape) print("The shape of X_test is ", X_test.shape) print("The shape of y_train is ", y_train.shape) print("The shape of y_test is ", y_test.shape) # ## models # support vector machines clf = SVC(kernel="linear", C=1, random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # printing the test accuracy print( "The test accuracy score of Logistric Regression is ", accuracy_score(y_test, y_pred), ) print( "The test report of Logistric Regression is", classification_report(y_test, y_pred) ) print("the confusion matrix is : \n", confusion_matrix(y_test, y_pred)) # logistic regression model logreg = LogisticRegression() logreg.fit(X_train, y_train) y_pred_proba = logreg.predict_proba(X_test) y_pred = np.argmax(y_pred_proba, axis=1) # printing the test accuracy print( "The test accuracy score of Logistric Regression is ", accuracy_score(y_test, y_pred), ) print( "The test report of Logistric Regression is", classification_report(y_test, y_pred) ) print("the confusion matrix is : \n", confusion_matrix(y_test, y_pred)) # knn model from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(X_train, y_train) pred = knn.predict(X_test) # printing the test accuracy print( "The test accuracy score of Logistric Regression is ", accuracy_score(y_test, y_pred), ) print( "The test report of Logistric Regression is", classification_report(y_test, y_pred) ) print("the confusion matrix is : \n", confusion_matrix(y_test, y_pred)) # calculating the probabilities y_pred_prob = logreg.predict_proba(X_test)[:, 1] fpr, tpr, threshols = roc_curve(y_test, y_pred_prob) # plotting the curve plt.plot([0, 1], [0, 1], "k--", "r+") plt.plot(fpr, tpr, label="Logistic Regression") plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Logistric Regression ROC Curve") plt.show() # Random forest model rf = RandomForestClassifier() rf.fit(X_train, y_train) y_pred = rf.predict(X_test) # printing the test accuracy print( "The test accuracy score of Logistric Regression is ", accuracy_score(y_test, y_pred), ) print( "The test report of Logistric Regression is", classification_report(y_test, y_pred) ) print("the confusion matrix is : \n", confusion_matrix(y_test, y_pred))
[{"heart-attack-analysis-prediction-dataset/heart.csv": {"column_names": "[\"age\", \"sex\", \"cp\", \"trtbps\", \"chol\", \"fbs\", \"restecg\", \"thalachh\", \"exng\", \"oldpeak\", \"slp\", \"caa\", \"thall\", \"output\"]", "column_data_types": "{\"age\": \"int64\", \"sex\": \"int64\", \"cp\": \"int64\", \"trtbps\": \"int64\", \"chol\": \"int64\", \"fbs\": \"int64\", \"restecg\": \"int64\", \"thalachh\": \"int64\", \"exng\": \"int64\", \"oldpeak\": \"float64\", \"slp\": \"int64\", \"caa\": \"int64\", \"thall\": \"int64\", \"output\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 303 entries, 0 to 302\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 age 303 non-null int64 \n 1 sex 303 non-null int64 \n 2 cp 303 non-null int64 \n 3 trtbps 303 non-null int64 \n 4 chol 303 non-null int64 \n 5 fbs 303 non-null int64 \n 6 restecg 303 non-null int64 \n 7 thalachh 303 non-null int64 \n 8 exng 303 non-null int64 \n 9 oldpeak 303 non-null float64\n 10 slp 303 non-null int64 \n 11 caa 303 non-null int64 \n 12 thall 303 non-null int64 \n 13 output 303 non-null int64 \ndtypes: float64(1), int64(13)\nmemory usage: 33.3 KB\n", "summary": "{\"age\": {\"count\": 303.0, \"mean\": 54.366336633663366, \"std\": 9.082100989837857, \"min\": 29.0, \"25%\": 47.5, \"50%\": 55.0, \"75%\": 61.0, \"max\": 77.0}, \"sex\": {\"count\": 303.0, \"mean\": 0.6831683168316832, \"std\": 0.46601082333962385, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"cp\": {\"count\": 303.0, \"mean\": 0.966996699669967, \"std\": 1.0320524894832985, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 3.0}, \"trtbps\": {\"count\": 303.0, \"mean\": 131.62376237623764, \"std\": 17.5381428135171, \"min\": 94.0, \"25%\": 120.0, \"50%\": 130.0, \"75%\": 140.0, \"max\": 200.0}, \"chol\": {\"count\": 303.0, \"mean\": 246.26402640264027, \"std\": 51.83075098793003, \"min\": 126.0, \"25%\": 211.0, \"50%\": 240.0, \"75%\": 274.5, \"max\": 564.0}, \"fbs\": {\"count\": 303.0, \"mean\": 0.1485148514851485, \"std\": 0.35619787492797644, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"restecg\": {\"count\": 303.0, \"mean\": 0.528052805280528, \"std\": 0.525859596359298, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 2.0}, \"thalachh\": {\"count\": 303.0, \"mean\": 149.64686468646866, \"std\": 22.905161114914094, \"min\": 71.0, \"25%\": 133.5, \"50%\": 153.0, \"75%\": 166.0, \"max\": 202.0}, \"exng\": {\"count\": 303.0, \"mean\": 0.32673267326732675, \"std\": 0.4697944645223165, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"oldpeak\": {\"count\": 303.0, \"mean\": 1.0396039603960396, \"std\": 1.1610750220686348, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.8, \"75%\": 1.6, \"max\": 6.2}, \"slp\": {\"count\": 303.0, \"mean\": 1.3993399339933994, \"std\": 0.6162261453459619, \"min\": 0.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 2.0}, \"caa\": {\"count\": 303.0, \"mean\": 0.7293729372937293, \"std\": 1.022606364969327, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 4.0}, \"thall\": {\"count\": 303.0, \"mean\": 2.3135313531353137, \"std\": 0.6122765072781409, \"min\": 0.0, \"25%\": 2.0, \"50%\": 2.0, \"75%\": 3.0, \"max\": 3.0}, \"output\": {\"count\": 303.0, \"mean\": 0.5445544554455446, \"std\": 0.4988347841643913, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"age\":{\"0\":63,\"1\":37,\"2\":41,\"3\":56},\"sex\":{\"0\":1,\"1\":1,\"2\":0,\"3\":1},\"cp\":{\"0\":3,\"1\":2,\"2\":1,\"3\":1},\"trtbps\":{\"0\":145,\"1\":130,\"2\":130,\"3\":120},\"chol\":{\"0\":233,\"1\":250,\"2\":204,\"3\":236},\"fbs\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"restecg\":{\"0\":0,\"1\":1,\"2\":0,\"3\":1},\"thalachh\":{\"0\":150,\"1\":187,\"2\":172,\"3\":178},\"exng\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"oldpeak\":{\"0\":2.3,\"1\":3.5,\"2\":1.4,\"3\":0.8},\"slp\":{\"0\":0,\"1\":0,\"2\":2,\"3\":2},\"caa\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"thall\":{\"0\":1,\"1\":2,\"2\":2,\"3\":2},\"output\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1}}"}}]
true
1
<start_data_description><data_path>heart-attack-analysis-prediction-dataset/heart.csv: <column_names> ['age', 'sex', 'cp', 'trtbps', 'chol', 'fbs', 'restecg', 'thalachh', 'exng', 'oldpeak', 'slp', 'caa', 'thall', 'output'] <column_types> {'age': 'int64', 'sex': 'int64', 'cp': 'int64', 'trtbps': 'int64', 'chol': 'int64', 'fbs': 'int64', 'restecg': 'int64', 'thalachh': 'int64', 'exng': 'int64', 'oldpeak': 'float64', 'slp': 'int64', 'caa': 'int64', 'thall': 'int64', 'output': 'int64'} <dataframe_Summary> {'age': {'count': 303.0, 'mean': 54.366336633663366, 'std': 9.082100989837857, 'min': 29.0, '25%': 47.5, '50%': 55.0, '75%': 61.0, 'max': 77.0}, 'sex': {'count': 303.0, 'mean': 0.6831683168316832, 'std': 0.46601082333962385, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'cp': {'count': 303.0, 'mean': 0.966996699669967, 'std': 1.0320524894832985, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 3.0}, 'trtbps': {'count': 303.0, 'mean': 131.62376237623764, 'std': 17.5381428135171, 'min': 94.0, '25%': 120.0, '50%': 130.0, '75%': 140.0, 'max': 200.0}, 'chol': {'count': 303.0, 'mean': 246.26402640264027, 'std': 51.83075098793003, 'min': 126.0, '25%': 211.0, '50%': 240.0, '75%': 274.5, 'max': 564.0}, 'fbs': {'count': 303.0, 'mean': 0.1485148514851485, 'std': 0.35619787492797644, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'restecg': {'count': 303.0, 'mean': 0.528052805280528, 'std': 0.525859596359298, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 2.0}, 'thalachh': {'count': 303.0, 'mean': 149.64686468646866, 'std': 22.905161114914094, 'min': 71.0, '25%': 133.5, '50%': 153.0, '75%': 166.0, 'max': 202.0}, 'exng': {'count': 303.0, 'mean': 0.32673267326732675, 'std': 0.4697944645223165, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'oldpeak': {'count': 303.0, 'mean': 1.0396039603960396, 'std': 1.1610750220686348, 'min': 0.0, '25%': 0.0, '50%': 0.8, '75%': 1.6, 'max': 6.2}, 'slp': {'count': 303.0, 'mean': 1.3993399339933994, 'std': 0.6162261453459619, 'min': 0.0, '25%': 1.0, '50%': 1.0, '75%': 2.0, 'max': 2.0}, 'caa': {'count': 303.0, 'mean': 0.7293729372937293, 'std': 1.022606364969327, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 4.0}, 'thall': {'count': 303.0, 'mean': 2.3135313531353137, 'std': 0.6122765072781409, 'min': 0.0, '25%': 2.0, '50%': 2.0, '75%': 3.0, 'max': 3.0}, 'output': {'count': 303.0, 'mean': 0.5445544554455446, 'std': 0.4988347841643913, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}} <dataframe_info> RangeIndex: 303 entries, 0 to 302 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 age 303 non-null int64 1 sex 303 non-null int64 2 cp 303 non-null int64 3 trtbps 303 non-null int64 4 chol 303 non-null int64 5 fbs 303 non-null int64 6 restecg 303 non-null int64 7 thalachh 303 non-null int64 8 exng 303 non-null int64 9 oldpeak 303 non-null float64 10 slp 303 non-null int64 11 caa 303 non-null int64 12 thall 303 non-null int64 13 output 303 non-null int64 dtypes: float64(1), int64(13) memory usage: 33.3 KB <some_examples> {'age': {'0': 63, '1': 37, '2': 41, '3': 56}, 'sex': {'0': 1, '1': 1, '2': 0, '3': 1}, 'cp': {'0': 3, '1': 2, '2': 1, '3': 1}, 'trtbps': {'0': 145, '1': 130, '2': 130, '3': 120}, 'chol': {'0': 233, '1': 250, '2': 204, '3': 236}, 'fbs': {'0': 1, '1': 0, '2': 0, '3': 0}, 'restecg': {'0': 0, '1': 1, '2': 0, '3': 1}, 'thalachh': {'0': 150, '1': 187, '2': 172, '3': 178}, 'exng': {'0': 0, '1': 0, '2': 0, '3': 0}, 'oldpeak': {'0': 2.3, '1': 3.5, '2': 1.4, '3': 0.8}, 'slp': {'0': 0, '1': 0, '2': 2, '3': 2}, 'caa': {'0': 0, '1': 0, '2': 0, '3': 0}, 'thall': {'0': 1, '1': 2, '2': 2, '3': 2}, 'output': {'0': 1, '1': 1, '2': 1, '3': 1}} <end_description>
2,318
1
4,012
2,318
129421075
<jupyter_start><jupyter_text>Black Friday Kaggle dataset identifier: black-friday <jupyter_code>import pandas as pd df = pd.read_csv('black-friday/train.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 550068 entries, 0 to 550067 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 User_ID 550068 non-null int64 1 Product_ID 550068 non-null object 2 Gender 550068 non-null object 3 Age 550068 non-null object 4 Occupation 550068 non-null int64 5 City_Category 550068 non-null object 6 Stay_In_Current_City_Years 550068 non-null object 7 Marital_Status 550068 non-null int64 8 Product_Category_1 550068 non-null int64 9 Product_Category_2 376430 non-null float64 10 Product_Category_3 166821 non-null float64 11 Purchase 550068 non-null int64 dtypes: float64(2), int64(5), object(5) memory usage: 50.4+ MB <jupyter_text>Examples: { "User_ID": 1000001, "Product_ID": "P00069042", "Gender": "F", "Age": "0-17", "Occupation": 10, "City_Category": "A", "Stay_In_Current_City_Years": 2, "Marital_Status": 0, "Product_Category_1": 3, "Product_Category_2": NaN, "Product_Category_3": NaN, "Purchase": 8370 } { "User_ID": 1000001, "Product_ID": "P00248942", "Gender": "F", "Age": "0-17", "Occupation": 10, "City_Category": "A", "Stay_In_Current_City_Years": 2, "Marital_Status": 0, "Product_Category_1": 1, "Product_Category_2": 6.0, "Product_Category_3": 14.0, "Purchase": 15200 } { "User_ID": 1000001, "Product_ID": "P00087842", "Gender": "F", "Age": "0-17", "Occupation": 10, "City_Category": "A", "Stay_In_Current_City_Years": 2, "Marital_Status": 0, "Product_Category_1": 12, "Product_Category_2": NaN, "Product_Category_3": NaN, "Purchase": 1422 } { "User_ID": 1000001, "Product_ID": "P00085442", "Gender": "F", "Age": "0-17", "Occupation": 10, "City_Category": "A", "Stay_In_Current_City_Years": 2, "Marital_Status": 0, "Product_Category_1": 12, "Product_Category_2": 14.0, "Product_Category_3": NaN, "Purchase": 1057 } <jupyter_code>import pandas as pd df = pd.read_csv('black-friday/test.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 233599 entries, 0 to 233598 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 User_ID 233599 non-null int64 1 Product_ID 233599 non-null object 2 Gender 233599 non-null object 3 Age 233599 non-null object 4 Occupation 233599 non-null int64 5 City_Category 233599 non-null object 6 Stay_In_Current_City_Years 233599 non-null object 7 Marital_Status 233599 non-null int64 8 Product_Category_1 233599 non-null int64 9 Product_Category_2 161255 non-null float64 10 Product_Category_3 71037 non-null float64 dtypes: float64(2), int64(4), object(5) memory usage: 19.6+ MB <jupyter_text>Examples: { "User_ID": 1000004, "Product_ID": "P00128942", "Gender": "M", "Age": "46-50", "Occupation": 7, "City_Category": "B", "Stay_In_Current_City_Years": "2", "Marital_Status": 1, "Product_Category_1": 1, "Product_Category_2": 11, "Product_Category_3": NaN } { "User_ID": 1000009, "Product_ID": "P00113442", "Gender": "M", "Age": "26-35", "Occupation": 17, "City_Category": "C", "Stay_In_Current_City_Years": "0", "Marital_Status": 0, "Product_Category_1": 3, "Product_Category_2": 5, "Product_Category_3": NaN } { "User_ID": 1000010, "Product_ID": "P00288442", "Gender": "F", "Age": "36-45", "Occupation": 1, "City_Category": "B", "Stay_In_Current_City_Years": "4+", "Marital_Status": 1, "Product_Category_1": 5, "Product_Category_2": 14, "Product_Category_3": NaN } { "User_ID": 1000010, "Product_ID": "P00145342", "Gender": "F", "Age": "36-45", "Occupation": 1, "City_Category": "B", "Stay_In_Current_City_Years": "4+", "Marital_Status": 1, "Product_Category_1": 4, "Product_Category_2": 9, "Product_Category_3": NaN } <jupyter_script># # Black Friday Dataset EDA And Feature Engineering # ## Cleaning and preparing the data for model training import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df_train = pd.read_csv("/kaggle/input/black-friday/train.csv") df_train.head() df_test = pd.read_csv("/kaggle/input/black-friday/test.csv") df_test.head() # Merge both train and test data df = pd.concat([df_train, df_test]) df.sample(5) df.info() df.drop(["User_ID"], axis=1, inplace=True) df.head() df["Gender"] = df["Gender"].map({"F": 0, "M": 1}) df.sample(5) # Handle categorical feature Age df["Age"].unique() pd.get_dummies(df["Age"]) # * This method will not work because all the ages are similar here, but in reality, people between 18 and maybe 36 buy more, and minors below 16 or individuals older than 55 usually don't spend as much money online # df["Age"] = df["Age"].map( {"0-17": 1, "55+": 7, "26-35": 3, "46-50": 5, "51-55": 6, "36-45": 4, "18-25": 2} ) df.sample(5) # fixing categorical city_categpries df.City_Category.unique() df_city = pd.get_dummies(df["City_Category"], drop_first=True) df_city # Concate df and df_city categories and drop the City_Category column df = pd.concat([df, df_city], axis=1) df.drop("City_Category", axis=1, inplace=True) df.head() df.info() # Missing Values df.isnull().sum() # Focus on replacing missing values product cat 2 df["Product_Category_2"].value_counts() # Mode imputation: Replace the missing value with the mode (most frequently occurring value) of the variable. This approach is suitable when dealing with categorical or nominal data. It preserves the distribution of the existing data but may introduce bias if the mode is overrepresented. df["Product_Category_2"].mode()[0] # Replacing the missing values with mode df["Product_Category_2"] = df["Product_Category_2"].fillna( df["Product_Category_2"].mode()[0] ) df["Product_Category_2"].isnull().sum() # Focus on replacing missing values Product_Category_3 df["Product_Category_3"] = df["Product_Category_3"].fillna( df["Product_Category_3"].mode()[0] ) df["Product_Category_3"].isnull().sum() df.sample(5) df_train.isnull().sum() df["Stay_In_Current_City_Years"].unique() df["Stay_In_Current_City_Years"] = df["Stay_In_Current_City_Years"].str.replace("+", "") df["Stay_In_Current_City_Years"].value_counts() df["Stay_In_Current_City_Years"].dtypes # Convert object type into integer df["Stay_In_Current_City_Years"] = df["Stay_In_Current_City_Years"].astype("int64") df["Stay_In_Current_City_Years"].dtypes df.info() # Convert uint8 type into integer df["B"] = df["B"].astype("int64") df["C"] = df["C"].astype("int64") df.info() ### Visualisation np.random.seed(42) sns.barplot(data=df.sample(10000), x="Age", y="Purchase", hue="Gender") plt.show() # 0 is women and men are 1 # #### Purchasing patterns differ between men and women, with men generally having higher purchasing rates. However, there is no significant difference in purchasing behavior observed across different age groups. np.random.seed(42) sns.barplot(data=df.sample(10000), x="Occupation", y="Purchase", hue="Gender") # I'm taking a sample of 10k because it take a lot of time to get the output plt.show() np.random.seed(42) sns.barplot(data=df.sample(10000), x="Product_Category_1", y="Purchase", hue="Gender") plt.show() np.random.seed(42) sns.barplot(data=df.sample(10000), x="Product_Category_2", y="Purchase", hue="Gender") plt.show() np.random.seed(42) sns.barplot(data=df.sample(10000), x="Product_Category_3", y="Purchase", hue="Gender") plt.show() # Feature Scaling df_train = df[~df["Purchase"].isnull()] df_test = df[df["Purchase"].isnull()] X = df_train.drop("Purchase", axis=1) X.head() y = df_train["Purchase"] y.head() X.shape, y.shape from sklearn.model_selection import train_test_split # split into train test sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) X_train.drop("Product_ID", axis=1, inplace=True) X_test.drop("Product_ID", axis=1, inplace=True) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/421/129421075.ipynb
black-friday
sdolezel
[{"Id": 129421075, "ScriptId": 38481153, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6793743, "CreationDate": "05/13/2023 16:34:27", "VersionNumber": 2.0, "Title": "BlackFriday EDA", "EvaluationDate": "05/13/2023", "IsChange": false, "TotalLines": 152.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 152.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 185456215, "KernelVersionId": 129421075, "SourceDatasetVersionId": 14692}]
[{"Id": 14692, "DatasetId": 10479, "DatasourceVersionId": 14692, "CreatorUserId": 932915, "LicenseName": "Unknown", "CreationDate": "01/21/2018 12:36:05", "VersionNumber": 1.0, "Title": "Black Friday", "Slug": "black-friday", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 7870870.0, "TotalUncompressedBytes": 7870870.0}]
[{"Id": 10479, "CreatorUserId": 932915, "OwnerUserId": 932915.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 14692.0, "CurrentDatasourceVersionId": 14692.0, "ForumId": 17842, "Type": 2, "CreationDate": "01/21/2018 12:36:05", "LastActivityDate": "01/30/2018", "TotalViews": 125104, "TotalDownloads": 29293, "TotalVotes": 209, "TotalKernels": 113}]
[{"Id": 932915, "UserName": "sdolezel", "DisplayName": "StefanDolezel", "RegisterDate": "02/27/2017", "PerformanceTier": 0}]
# # Black Friday Dataset EDA And Feature Engineering # ## Cleaning and preparing the data for model training import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df_train = pd.read_csv("/kaggle/input/black-friday/train.csv") df_train.head() df_test = pd.read_csv("/kaggle/input/black-friday/test.csv") df_test.head() # Merge both train and test data df = pd.concat([df_train, df_test]) df.sample(5) df.info() df.drop(["User_ID"], axis=1, inplace=True) df.head() df["Gender"] = df["Gender"].map({"F": 0, "M": 1}) df.sample(5) # Handle categorical feature Age df["Age"].unique() pd.get_dummies(df["Age"]) # * This method will not work because all the ages are similar here, but in reality, people between 18 and maybe 36 buy more, and minors below 16 or individuals older than 55 usually don't spend as much money online # df["Age"] = df["Age"].map( {"0-17": 1, "55+": 7, "26-35": 3, "46-50": 5, "51-55": 6, "36-45": 4, "18-25": 2} ) df.sample(5) # fixing categorical city_categpries df.City_Category.unique() df_city = pd.get_dummies(df["City_Category"], drop_first=True) df_city # Concate df and df_city categories and drop the City_Category column df = pd.concat([df, df_city], axis=1) df.drop("City_Category", axis=1, inplace=True) df.head() df.info() # Missing Values df.isnull().sum() # Focus on replacing missing values product cat 2 df["Product_Category_2"].value_counts() # Mode imputation: Replace the missing value with the mode (most frequently occurring value) of the variable. This approach is suitable when dealing with categorical or nominal data. It preserves the distribution of the existing data but may introduce bias if the mode is overrepresented. df["Product_Category_2"].mode()[0] # Replacing the missing values with mode df["Product_Category_2"] = df["Product_Category_2"].fillna( df["Product_Category_2"].mode()[0] ) df["Product_Category_2"].isnull().sum() # Focus on replacing missing values Product_Category_3 df["Product_Category_3"] = df["Product_Category_3"].fillna( df["Product_Category_3"].mode()[0] ) df["Product_Category_3"].isnull().sum() df.sample(5) df_train.isnull().sum() df["Stay_In_Current_City_Years"].unique() df["Stay_In_Current_City_Years"] = df["Stay_In_Current_City_Years"].str.replace("+", "") df["Stay_In_Current_City_Years"].value_counts() df["Stay_In_Current_City_Years"].dtypes # Convert object type into integer df["Stay_In_Current_City_Years"] = df["Stay_In_Current_City_Years"].astype("int64") df["Stay_In_Current_City_Years"].dtypes df.info() # Convert uint8 type into integer df["B"] = df["B"].astype("int64") df["C"] = df["C"].astype("int64") df.info() ### Visualisation np.random.seed(42) sns.barplot(data=df.sample(10000), x="Age", y="Purchase", hue="Gender") plt.show() # 0 is women and men are 1 # #### Purchasing patterns differ between men and women, with men generally having higher purchasing rates. However, there is no significant difference in purchasing behavior observed across different age groups. np.random.seed(42) sns.barplot(data=df.sample(10000), x="Occupation", y="Purchase", hue="Gender") # I'm taking a sample of 10k because it take a lot of time to get the output plt.show() np.random.seed(42) sns.barplot(data=df.sample(10000), x="Product_Category_1", y="Purchase", hue="Gender") plt.show() np.random.seed(42) sns.barplot(data=df.sample(10000), x="Product_Category_2", y="Purchase", hue="Gender") plt.show() np.random.seed(42) sns.barplot(data=df.sample(10000), x="Product_Category_3", y="Purchase", hue="Gender") plt.show() # Feature Scaling df_train = df[~df["Purchase"].isnull()] df_test = df[df["Purchase"].isnull()] X = df_train.drop("Purchase", axis=1) X.head() y = df_train["Purchase"] y.head() X.shape, y.shape from sklearn.model_selection import train_test_split # split into train test sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) X_train.drop("Product_ID", axis=1, inplace=True) X_test.drop("Product_ID", axis=1, inplace=True) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test)
[{"black-friday/train.csv": {"column_names": "[\"User_ID\", \"Product_ID\", \"Gender\", \"Age\", \"Occupation\", \"City_Category\", \"Stay_In_Current_City_Years\", \"Marital_Status\", \"Product_Category_1\", \"Product_Category_2\", \"Product_Category_3\", \"Purchase\"]", "column_data_types": "{\"User_ID\": \"int64\", \"Product_ID\": \"object\", \"Gender\": \"object\", \"Age\": \"object\", \"Occupation\": \"int64\", \"City_Category\": \"object\", \"Stay_In_Current_City_Years\": \"object\", \"Marital_Status\": \"int64\", \"Product_Category_1\": \"int64\", \"Product_Category_2\": \"float64\", \"Product_Category_3\": \"float64\", \"Purchase\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 550068 entries, 0 to 550067\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 User_ID 550068 non-null int64 \n 1 Product_ID 550068 non-null object \n 2 Gender 550068 non-null object \n 3 Age 550068 non-null object \n 4 Occupation 550068 non-null int64 \n 5 City_Category 550068 non-null object \n 6 Stay_In_Current_City_Years 550068 non-null object \n 7 Marital_Status 550068 non-null int64 \n 8 Product_Category_1 550068 non-null int64 \n 9 Product_Category_2 376430 non-null float64\n 10 Product_Category_3 166821 non-null float64\n 11 Purchase 550068 non-null int64 \ndtypes: float64(2), int64(5), object(5)\nmemory usage: 50.4+ MB\n", "summary": "{\"User_ID\": {\"count\": 550068.0, \"mean\": 1003028.8424013031, \"std\": 1727.5915855305516, \"min\": 1000001.0, \"25%\": 1001516.0, \"50%\": 1003077.0, \"75%\": 1004478.0, \"max\": 1006040.0}, \"Occupation\": {\"count\": 550068.0, \"mean\": 8.076706879876669, \"std\": 6.522660487341824, \"min\": 0.0, \"25%\": 2.0, \"50%\": 7.0, \"75%\": 14.0, \"max\": 20.0}, \"Marital_Status\": {\"count\": 550068.0, \"mean\": 0.40965298835780306, \"std\": 0.49177012631733, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"Product_Category_1\": {\"count\": 550068.0, \"mean\": 5.404270017525106, \"std\": 3.936211369201389, \"min\": 1.0, \"25%\": 1.0, \"50%\": 5.0, \"75%\": 8.0, \"max\": 20.0}, \"Product_Category_2\": {\"count\": 376430.0, \"mean\": 9.842329251122386, \"std\": 5.086589648693479, \"min\": 2.0, \"25%\": 5.0, \"50%\": 9.0, \"75%\": 15.0, \"max\": 18.0}, \"Product_Category_3\": {\"count\": 166821.0, \"mean\": 12.668243206790512, \"std\": 4.125337631575282, \"min\": 3.0, \"25%\": 9.0, \"50%\": 14.0, \"75%\": 16.0, \"max\": 18.0}, \"Purchase\": {\"count\": 550068.0, \"mean\": 9263.968712959126, \"std\": 5023.065393820582, \"min\": 12.0, \"25%\": 5823.0, \"50%\": 8047.0, \"75%\": 12054.0, \"max\": 23961.0}}", "examples": "{\"User_ID\":{\"0\":1000001,\"1\":1000001,\"2\":1000001,\"3\":1000001},\"Product_ID\":{\"0\":\"P00069042\",\"1\":\"P00248942\",\"2\":\"P00087842\",\"3\":\"P00085442\"},\"Gender\":{\"0\":\"F\",\"1\":\"F\",\"2\":\"F\",\"3\":\"F\"},\"Age\":{\"0\":\"0-17\",\"1\":\"0-17\",\"2\":\"0-17\",\"3\":\"0-17\"},\"Occupation\":{\"0\":10,\"1\":10,\"2\":10,\"3\":10},\"City_Category\":{\"0\":\"A\",\"1\":\"A\",\"2\":\"A\",\"3\":\"A\"},\"Stay_In_Current_City_Years\":{\"0\":\"2\",\"1\":\"2\",\"2\":\"2\",\"3\":\"2\"},\"Marital_Status\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"Product_Category_1\":{\"0\":3,\"1\":1,\"2\":12,\"3\":12},\"Product_Category_2\":{\"0\":null,\"1\":6.0,\"2\":null,\"3\":14.0},\"Product_Category_3\":{\"0\":null,\"1\":14.0,\"2\":null,\"3\":null},\"Purchase\":{\"0\":8370,\"1\":15200,\"2\":1422,\"3\":1057}}"}}, {"black-friday/test.csv": {"column_names": "[\"User_ID\", \"Product_ID\", \"Gender\", \"Age\", \"Occupation\", \"City_Category\", \"Stay_In_Current_City_Years\", \"Marital_Status\", \"Product_Category_1\", \"Product_Category_2\", \"Product_Category_3\"]", "column_data_types": "{\"User_ID\": \"int64\", \"Product_ID\": \"object\", \"Gender\": \"object\", \"Age\": \"object\", \"Occupation\": \"int64\", \"City_Category\": \"object\", \"Stay_In_Current_City_Years\": \"object\", \"Marital_Status\": \"int64\", \"Product_Category_1\": \"int64\", \"Product_Category_2\": \"float64\", \"Product_Category_3\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 233599 entries, 0 to 233598\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 User_ID 233599 non-null int64 \n 1 Product_ID 233599 non-null object \n 2 Gender 233599 non-null object \n 3 Age 233599 non-null object \n 4 Occupation 233599 non-null int64 \n 5 City_Category 233599 non-null object \n 6 Stay_In_Current_City_Years 233599 non-null object \n 7 Marital_Status 233599 non-null int64 \n 8 Product_Category_1 233599 non-null int64 \n 9 Product_Category_2 161255 non-null float64\n 10 Product_Category_3 71037 non-null float64\ndtypes: float64(2), int64(4), object(5)\nmemory usage: 19.6+ MB\n", "summary": "{\"User_ID\": {\"count\": 233599.0, \"mean\": 1003029.3568594044, \"std\": 1726.5049679955312, \"min\": 1000001.0, \"25%\": 1001527.0, \"50%\": 1003070.0, \"75%\": 1004477.0, \"max\": 1006040.0}, \"Occupation\": {\"count\": 233599.0, \"mean\": 8.085407043694536, \"std\": 6.521146481494521, \"min\": 0.0, \"25%\": 2.0, \"50%\": 7.0, \"75%\": 14.0, \"max\": 20.0}, \"Marital_Status\": {\"count\": 233599.0, \"mean\": 0.4100702485883929, \"std\": 0.49184720737729476, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"Product_Category_1\": {\"count\": 233599.0, \"mean\": 5.276542279718663, \"std\": 3.7363801122656355, \"min\": 1.0, \"25%\": 1.0, \"50%\": 5.0, \"75%\": 8.0, \"max\": 18.0}, \"Product_Category_2\": {\"count\": 161255.0, \"mean\": 9.849586059346997, \"std\": 5.094942849775034, \"min\": 2.0, \"25%\": 5.0, \"50%\": 9.0, \"75%\": 15.0, \"max\": 18.0}, \"Product_Category_3\": {\"count\": 71037.0, \"mean\": 12.669453946534905, \"std\": 4.125944373515683, \"min\": 3.0, \"25%\": 9.0, \"50%\": 14.0, \"75%\": 16.0, \"max\": 18.0}}", "examples": "{\"User_ID\":{\"0\":1000004,\"1\":1000009,\"2\":1000010,\"3\":1000010},\"Product_ID\":{\"0\":\"P00128942\",\"1\":\"P00113442\",\"2\":\"P00288442\",\"3\":\"P00145342\"},\"Gender\":{\"0\":\"M\",\"1\":\"M\",\"2\":\"F\",\"3\":\"F\"},\"Age\":{\"0\":\"46-50\",\"1\":\"26-35\",\"2\":\"36-45\",\"3\":\"36-45\"},\"Occupation\":{\"0\":7,\"1\":17,\"2\":1,\"3\":1},\"City_Category\":{\"0\":\"B\",\"1\":\"C\",\"2\":\"B\",\"3\":\"B\"},\"Stay_In_Current_City_Years\":{\"0\":\"2\",\"1\":\"0\",\"2\":\"4+\",\"3\":\"4+\"},\"Marital_Status\":{\"0\":1,\"1\":0,\"2\":1,\"3\":1},\"Product_Category_1\":{\"0\":1,\"1\":3,\"2\":5,\"3\":4},\"Product_Category_2\":{\"0\":11.0,\"1\":5.0,\"2\":14.0,\"3\":9.0},\"Product_Category_3\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null}}"}}]
true
2
<start_data_description><data_path>black-friday/train.csv: <column_names> ['User_ID', 'Product_ID', 'Gender', 'Age', 'Occupation', 'City_Category', 'Stay_In_Current_City_Years', 'Marital_Status', 'Product_Category_1', 'Product_Category_2', 'Product_Category_3', 'Purchase'] <column_types> {'User_ID': 'int64', 'Product_ID': 'object', 'Gender': 'object', 'Age': 'object', 'Occupation': 'int64', 'City_Category': 'object', 'Stay_In_Current_City_Years': 'object', 'Marital_Status': 'int64', 'Product_Category_1': 'int64', 'Product_Category_2': 'float64', 'Product_Category_3': 'float64', 'Purchase': 'int64'} <dataframe_Summary> {'User_ID': {'count': 550068.0, 'mean': 1003028.8424013031, 'std': 1727.5915855305516, 'min': 1000001.0, '25%': 1001516.0, '50%': 1003077.0, '75%': 1004478.0, 'max': 1006040.0}, 'Occupation': {'count': 550068.0, 'mean': 8.076706879876669, 'std': 6.522660487341824, 'min': 0.0, '25%': 2.0, '50%': 7.0, '75%': 14.0, 'max': 20.0}, 'Marital_Status': {'count': 550068.0, 'mean': 0.40965298835780306, 'std': 0.49177012631733, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'Product_Category_1': {'count': 550068.0, 'mean': 5.404270017525106, 'std': 3.936211369201389, 'min': 1.0, '25%': 1.0, '50%': 5.0, '75%': 8.0, 'max': 20.0}, 'Product_Category_2': {'count': 376430.0, 'mean': 9.842329251122386, 'std': 5.086589648693479, 'min': 2.0, '25%': 5.0, '50%': 9.0, '75%': 15.0, 'max': 18.0}, 'Product_Category_3': {'count': 166821.0, 'mean': 12.668243206790512, 'std': 4.125337631575282, 'min': 3.0, '25%': 9.0, '50%': 14.0, '75%': 16.0, 'max': 18.0}, 'Purchase': {'count': 550068.0, 'mean': 9263.968712959126, 'std': 5023.065393820582, 'min': 12.0, '25%': 5823.0, '50%': 8047.0, '75%': 12054.0, 'max': 23961.0}} <dataframe_info> RangeIndex: 550068 entries, 0 to 550067 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 User_ID 550068 non-null int64 1 Product_ID 550068 non-null object 2 Gender 550068 non-null object 3 Age 550068 non-null object 4 Occupation 550068 non-null int64 5 City_Category 550068 non-null object 6 Stay_In_Current_City_Years 550068 non-null object 7 Marital_Status 550068 non-null int64 8 Product_Category_1 550068 non-null int64 9 Product_Category_2 376430 non-null float64 10 Product_Category_3 166821 non-null float64 11 Purchase 550068 non-null int64 dtypes: float64(2), int64(5), object(5) memory usage: 50.4+ MB <some_examples> {'User_ID': {'0': 1000001, '1': 1000001, '2': 1000001, '3': 1000001}, 'Product_ID': {'0': 'P00069042', '1': 'P00248942', '2': 'P00087842', '3': 'P00085442'}, 'Gender': {'0': 'F', '1': 'F', '2': 'F', '3': 'F'}, 'Age': {'0': '0-17', '1': '0-17', '2': '0-17', '3': '0-17'}, 'Occupation': {'0': 10, '1': 10, '2': 10, '3': 10}, 'City_Category': {'0': 'A', '1': 'A', '2': 'A', '3': 'A'}, 'Stay_In_Current_City_Years': {'0': '2', '1': '2', '2': '2', '3': '2'}, 'Marital_Status': {'0': 0, '1': 0, '2': 0, '3': 0}, 'Product_Category_1': {'0': 3, '1': 1, '2': 12, '3': 12}, 'Product_Category_2': {'0': None, '1': 6.0, '2': None, '3': 14.0}, 'Product_Category_3': {'0': None, '1': 14.0, '2': None, '3': None}, 'Purchase': {'0': 8370, '1': 15200, '2': 1422, '3': 1057}} <end_description> <start_data_description><data_path>black-friday/test.csv: <column_names> ['User_ID', 'Product_ID', 'Gender', 'Age', 'Occupation', 'City_Category', 'Stay_In_Current_City_Years', 'Marital_Status', 'Product_Category_1', 'Product_Category_2', 'Product_Category_3'] <column_types> {'User_ID': 'int64', 'Product_ID': 'object', 'Gender': 'object', 'Age': 'object', 'Occupation': 'int64', 'City_Category': 'object', 'Stay_In_Current_City_Years': 'object', 'Marital_Status': 'int64', 'Product_Category_1': 'int64', 'Product_Category_2': 'float64', 'Product_Category_3': 'float64'} <dataframe_Summary> {'User_ID': {'count': 233599.0, 'mean': 1003029.3568594044, 'std': 1726.5049679955312, 'min': 1000001.0, '25%': 1001527.0, '50%': 1003070.0, '75%': 1004477.0, 'max': 1006040.0}, 'Occupation': {'count': 233599.0, 'mean': 8.085407043694536, 'std': 6.521146481494521, 'min': 0.0, '25%': 2.0, '50%': 7.0, '75%': 14.0, 'max': 20.0}, 'Marital_Status': {'count': 233599.0, 'mean': 0.4100702485883929, 'std': 0.49184720737729476, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'Product_Category_1': {'count': 233599.0, 'mean': 5.276542279718663, 'std': 3.7363801122656355, 'min': 1.0, '25%': 1.0, '50%': 5.0, '75%': 8.0, 'max': 18.0}, 'Product_Category_2': {'count': 161255.0, 'mean': 9.849586059346997, 'std': 5.094942849775034, 'min': 2.0, '25%': 5.0, '50%': 9.0, '75%': 15.0, 'max': 18.0}, 'Product_Category_3': {'count': 71037.0, 'mean': 12.669453946534905, 'std': 4.125944373515683, 'min': 3.0, '25%': 9.0, '50%': 14.0, '75%': 16.0, 'max': 18.0}} <dataframe_info> RangeIndex: 233599 entries, 0 to 233598 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 User_ID 233599 non-null int64 1 Product_ID 233599 non-null object 2 Gender 233599 non-null object 3 Age 233599 non-null object 4 Occupation 233599 non-null int64 5 City_Category 233599 non-null object 6 Stay_In_Current_City_Years 233599 non-null object 7 Marital_Status 233599 non-null int64 8 Product_Category_1 233599 non-null int64 9 Product_Category_2 161255 non-null float64 10 Product_Category_3 71037 non-null float64 dtypes: float64(2), int64(4), object(5) memory usage: 19.6+ MB <some_examples> {'User_ID': {'0': 1000004, '1': 1000009, '2': 1000010, '3': 1000010}, 'Product_ID': {'0': 'P00128942', '1': 'P00113442', '2': 'P00288442', '3': 'P00145342'}, 'Gender': {'0': 'M', '1': 'M', '2': 'F', '3': 'F'}, 'Age': {'0': '46-50', '1': '26-35', '2': '36-45', '3': '36-45'}, 'Occupation': {'0': 7, '1': 17, '2': 1, '3': 1}, 'City_Category': {'0': 'B', '1': 'C', '2': 'B', '3': 'B'}, 'Stay_In_Current_City_Years': {'0': '2', '1': '0', '2': '4+', '3': '4+'}, 'Marital_Status': {'0': 1, '1': 0, '2': 1, '3': 1}, 'Product_Category_1': {'0': 1, '1': 3, '2': 5, '3': 4}, 'Product_Category_2': {'0': 11.0, '1': 5.0, '2': 14.0, '3': 9.0}, 'Product_Category_3': {'0': None, '1': None, '2': None, '3': None}} <end_description>
1,449
1
3,269
1,449
129421175
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## **Goal of the Competition** # The goal of this competition is to predict if a person has any of three medical conditions. You will create a model trained on measurements of health characteristics. # To determine if someone has these medical conditions requires a long and intrusive process to collect information from patients. With predictive models, we can shorten this process and keep patient details private by collecting key characteristics relative to the conditions, then encoding these characteristics. # Your work will help researchers discover the relationship between measurements of certain characteristics and potential patient conditions. # **I will continue to work and update this notebook. Please upvote it if you find it useful in this interesting challenge!** # ## **Dataset Description** # The competition data comprises over fifty anonymized health characteristics linked to three age-related conditions. Your goal is to predict whether a subject has or has not been diagnosed with one of these conditions -- a binary classification problem. # **train.csv - The training set.** # * Id: Unique identifier for each observation. # * AB-GL: Fifty-six anonymized health characteristics. All are numeric except for EJ, which is categorical. # * Class A: binary target: 1 indicates the subject has been diagnosed with one of the three conditions, 0 indicates they have not. train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") train # Loop over each float64 column for i, column_name in enumerate(train.select_dtypes(include="float64").columns): # Create a figure and axes fig, ax = plt.subplots() # Plot the distribution using a histogram ax.hist(train[column_name], bins=30, color="steelblue", alpha=0.7) # Add a vertical line for the mean ax.axvline( train[column_name].mean(), color="black", linestyle="dashed", linewidth=2, label=f"Mean = {train[column_name].mean():.2f}", ) # Add a vertical line for the median ax.axvline( train[column_name].median(), color="red", linestyle="dashed", linewidth=2, label=f"Median = {train[column_name].median():.2f}", ) # Set the subplot title and axis labels ax.set_title(f"{column_name} Distribution with Mean and Median") ax.set_xlabel(column_name) ax.set_ylabel("Count") # Add a legend ax.legend() # Set the main title for all subplots plt.suptitle("Distributions of Numerical Anonymized Health Characteristic Columns") # Show the plot plt.show() # Pearson correlation is measure of the strength of the linear relationship between two continuous variables. It measures the degree to which the two variables are related to each other and ranges between -1 and 1. The correlation between the numerical features in the train dataset range from 0.3 to -0.3 # Compute the correlation matrix corr = train.corr() # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap( corr, mask=mask, cmap=cmap, vmax=0.3, center=0, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5}, )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/421/129421175.ipynb
null
null
[{"Id": 129421175, "ScriptId": 38419017, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4206209, "CreationDate": "05/13/2023 16:35:31", "VersionNumber": 1.0, "Title": "ICR - Exploratory Data & Statistical Analysis", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 101.0, "LinesInsertedFromPrevious": 101.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## **Goal of the Competition** # The goal of this competition is to predict if a person has any of three medical conditions. You will create a model trained on measurements of health characteristics. # To determine if someone has these medical conditions requires a long and intrusive process to collect information from patients. With predictive models, we can shorten this process and keep patient details private by collecting key characteristics relative to the conditions, then encoding these characteristics. # Your work will help researchers discover the relationship between measurements of certain characteristics and potential patient conditions. # **I will continue to work and update this notebook. Please upvote it if you find it useful in this interesting challenge!** # ## **Dataset Description** # The competition data comprises over fifty anonymized health characteristics linked to three age-related conditions. Your goal is to predict whether a subject has or has not been diagnosed with one of these conditions -- a binary classification problem. # **train.csv - The training set.** # * Id: Unique identifier for each observation. # * AB-GL: Fifty-six anonymized health characteristics. All are numeric except for EJ, which is categorical. # * Class A: binary target: 1 indicates the subject has been diagnosed with one of the three conditions, 0 indicates they have not. train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") train # Loop over each float64 column for i, column_name in enumerate(train.select_dtypes(include="float64").columns): # Create a figure and axes fig, ax = plt.subplots() # Plot the distribution using a histogram ax.hist(train[column_name], bins=30, color="steelblue", alpha=0.7) # Add a vertical line for the mean ax.axvline( train[column_name].mean(), color="black", linestyle="dashed", linewidth=2, label=f"Mean = {train[column_name].mean():.2f}", ) # Add a vertical line for the median ax.axvline( train[column_name].median(), color="red", linestyle="dashed", linewidth=2, label=f"Median = {train[column_name].median():.2f}", ) # Set the subplot title and axis labels ax.set_title(f"{column_name} Distribution with Mean and Median") ax.set_xlabel(column_name) ax.set_ylabel("Count") # Add a legend ax.legend() # Set the main title for all subplots plt.suptitle("Distributions of Numerical Anonymized Health Characteristic Columns") # Show the plot plt.show() # Pearson correlation is measure of the strength of the linear relationship between two continuous variables. It measures the degree to which the two variables are related to each other and ranges between -1 and 1. The correlation between the numerical features in the train dataset range from 0.3 to -0.3 # Compute the correlation matrix corr = train.corr() # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap( corr, mask=mask, cmap=cmap, vmax=0.3, center=0, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5}, )
false
0
1,056
1
1,056
1,056
129421344
<jupyter_start><jupyter_text>photo_1 Kaggle dataset identifier: photo-1 <jupyter_script>import cv2 import pytesseract # Read image using OpenCV image = cv2.imread( "/kaggle/input/single-image/purepng.com-flower-vectorflower-clipart-vector-floral-961524679749nrsh7.png" ) # Preprocess image using OpenCV gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.medianBlur(gray, 3) gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] # Perform OCR on the preprocessed image using PyTesseract text = pytesseract.image_to_string(gray, config="--psm 11") # Extract float numbers from the text using regular expressions import re floats = re.findall(r"\d+\.\d+", text) print(floats) import cv2 import pytesseract import re def extract_float_numbers(image_path): # Load the image image = cv2.imread(image_path) # Convert the image to grayscale gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Apply thresholding to convert the image to black and white thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1] # Perform OCR on the image using Tesseract text = pytesseract.image_to_string(thresh, config="--psm 6") # Extract all the float numbers using regular expressions float_numbers = re.findall(r"\d+\.\d+", text) return float_numbers print( extract_float_numbers( "/kaggle/input/single-image/purepng.com-flower-vectorflower-clipart-vector-floral-961524679749nrsh7.png" ) )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/421/129421344.ipynb
photo-1
husseinshafeek
[{"Id": 129421344, "ScriptId": 38476325, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8058159, "CreationDate": "05/13/2023 16:37:09", "VersionNumber": 1.0, "Title": "compute_float_numbers", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 39.0, "LinesInsertedFromPrevious": 39.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185456742, "KernelVersionId": 129421344, "SourceDatasetVersionId": 5677634}, {"Id": 185456741, "KernelVersionId": 129421344, "SourceDatasetVersionId": 5676853}]
[{"Id": 5677634, "DatasetId": 3263924, "DatasourceVersionId": 5753187, "CreatorUserId": 8058159, "LicenseName": "Unknown", "CreationDate": "05/13/2023 16:20:53", "VersionNumber": 1.0, "Title": "photo_1", "Slug": "photo-1", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3263924, "CreatorUserId": 8058159, "OwnerUserId": 8058159.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5677634.0, "CurrentDatasourceVersionId": 5753187.0, "ForumId": 3329527, "Type": 2, "CreationDate": "05/13/2023 16:20:53", "LastActivityDate": "05/13/2023", "TotalViews": 31, "TotalDownloads": 2, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 8058159, "UserName": "husseinshafeek", "DisplayName": "hussein shafeek", "RegisterDate": "08/03/2021", "PerformanceTier": 0}]
import cv2 import pytesseract # Read image using OpenCV image = cv2.imread( "/kaggle/input/single-image/purepng.com-flower-vectorflower-clipart-vector-floral-961524679749nrsh7.png" ) # Preprocess image using OpenCV gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.medianBlur(gray, 3) gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] # Perform OCR on the preprocessed image using PyTesseract text = pytesseract.image_to_string(gray, config="--psm 11") # Extract float numbers from the text using regular expressions import re floats = re.findall(r"\d+\.\d+", text) print(floats) import cv2 import pytesseract import re def extract_float_numbers(image_path): # Load the image image = cv2.imread(image_path) # Convert the image to grayscale gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Apply thresholding to convert the image to black and white thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1] # Perform OCR on the image using Tesseract text = pytesseract.image_to_string(thresh, config="--psm 6") # Extract all the float numbers using regular expressions float_numbers = re.findall(r"\d+\.\d+", text) return float_numbers print( extract_float_numbers( "/kaggle/input/single-image/purepng.com-flower-vectorflower-clipart-vector-floral-961524679749nrsh7.png" ) )
false
0
475
0
495
475
129421220
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data = pd.read_excel("/kaggle/input/forecasting-orf/dataset.xlsx") data = data.drop( [ "SAND_NAME", "ASSESSED", "SDDATE", "SDYEAR", "SDDATEH", "SDYEARH", "WELLAPI", "BOEM_FIELD", "FDDATE", "FDYEAR", "EIAID", "SAND", "PLAY_NUM", "PLAY_NAME", "POOL_NAME", "CHRONOZONE", "PLAY_TYPE", "SS", "THK", "TAREA", "TVOL", "OTHK", "OAREA", "OVOL", "GTHK", "GAREA", "GVOL", "DRIVE", "YIELD", "GOR", "SPGR", "BGI", "BOI", "GRF", "GRECO", "GRECG", "GRP", "NCNT", "UCNT", "BHCOMP", ], axis=1, ) data = data.drop(data[data["ORF"] > 1].index) data = data.drop( [ "FSTRUC", "FTRAP1", "FTRAP2", "PLAREA", "WDEP", "Original Gas", "Cum Gas", "Gas Reserves", "BOE Reserves", "SDPG", "SDTG", "RSI", "GIP", "ORECG", "ORP", "TCNT", "SN_FORMSAND", "Original BOE", "Cum BOE", "PROP", "FCLASS", "RECG_AF", ], axis=1, ) data = data.drop(data[data["SD_TYPE"] == "W"].index) data["FSTAT"] = data["FSTAT"].replace({"E": 0, "A": 1}) data["RESTYP"] = data["RESTYP"].replace({"N": 0, "U": 1, "S": 2}) data["SD_TYPE"] = data["SD_TYPE"].replace({"G": 0, "B": 1, "O": 2}) data = data.drop(data[data["ORF"] == 0].index) # data['PERMEABILITY'] = data['PERMEABILITY'].apply(np.log) data = data.dropna().reset_index() data.head() from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn import linear_model import seaborn as sns from sklearn.linear_model import Ridge from sklearn.linear_model import RidgeCV from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import accuracy_score from keras.optimizers import Adam from tensorflow.keras.optimizers import SGD from tensorflow.keras.callbacks import EarlyStopping from sklearn.svm import SVR from keras.activations import sigmoid def clean_dataset(df): assert isinstance(df, pd.DataFrame), "df needs to be a pd.DataFrame" df.dropna(inplace=True) indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf]).any(axis=1) return df[indices_to_keep].astype(np.float64) data = clean_dataset(data) features = ["POROSITY", "PERMEABILITY", "SW"] target = "ORF" scaler = MinMaxScaler() X = data[features].values X = scaler.fit_transform(X) y = data[target].values X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) model_linear = linear_model.LinearRegression(positive=True) model_linear.fit(X_train, y_train) y_pred = model_linear.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(y_test, y_pred, color="blue") plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) alphas = [0.1, 0.5, 1] model_ridgecv = RidgeCV(alphas=alphas) model_ridgecv.fit(X_train, y_train) y_pred = model_ridgecv.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model_lasso = Lasso(alpha=1.0) model_lasso.fit(X_train, y_train) y_pred = model_lasso.predict(X_test) print("Коэффициенты регрессии:") print(model_lasso.coef_) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model_dtr = DecisionTreeRegressor(splitter="best", random_state=5) model_dtr.fit(X_train, y_train) y_pred = model_dtr.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(X_train[:, 0], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 0], y_pred, color="red", label="Test Data") plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model_gbr = GradientBoostingRegressor(n_estimators=100, learning_rate=0.01) model_gbr.fit(X_train, y_train) y_pred = model_gbr.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(X_train[:, 0], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 0], y_pred, color="red", label="Test Data") plt.show() plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model_rfr = RandomForestRegressor(n_estimators=100) model_rfr.fit(X_train, y_train) y_pred = model_rfr.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(y_test, y_pred) plt.show() plt.scatter(X_train[:, 0], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 0], y_pred, color="red", label="Test Data") plt.show() plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) model = keras.Sequential() model.add(layers.Dense(25, activation="relu", input_shape=(3,))) model.add(layers.Dense(5, activation=sigmoid)) model.add(layers.Dense(1)) model.compile(optimizer="adam", loss="mean_squared_error") # early_stopping = EarlyStopping(monitor='val_loss', patience=10) model.fit( X_train, y_train, epochs=100, batch_size=32, verbose=1, validation_data=(X_test, y_test), ) loss = model.evaluate(X_test, y_test) y_pred = model.predict(X_test) plt.scatter(X_train[:, 0], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 0], y_pred, color="red", label="Test Data") plt.show() plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model_svr = SVR(kernel="rbf", C=1.0, epsilon=0.1) model_svr.fit(X_train, y_train) y_pred = model_svr.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(X_train[:, 0], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 0], y_pred, color="red", label="Test Data") plt.show() plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() new_data = [[0.2, 0.3, 0.3]] print("linear: {}".format(model_linear.predict(new_data))) print("ridgecv: {}".format(model_ridgecv.predict(new_data))) print("lass: {}".format(model_lasso.predict(new_data))) print("desicion tree: {}".format(model_dtr.predict(new_data))) print("gradient boosting: {}".format(model_gbr.predict(new_data))) print("rfr: {}".format(model_rfr.predict(new_data))) print("AI: {}".format(model.predict(new_data))) print("SVR: {}".format(model_svr.predict(new_data)))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/421/129421220.ipynb
null
null
[{"Id": 129421220, "ScriptId": 38385257, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12918394, "CreationDate": "05/13/2023 16:35:56", "VersionNumber": 3.0, "Title": "ORF forecast", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 248.0, "LinesInsertedFromPrevious": 166.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 82.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session data = pd.read_excel("/kaggle/input/forecasting-orf/dataset.xlsx") data = data.drop( [ "SAND_NAME", "ASSESSED", "SDDATE", "SDYEAR", "SDDATEH", "SDYEARH", "WELLAPI", "BOEM_FIELD", "FDDATE", "FDYEAR", "EIAID", "SAND", "PLAY_NUM", "PLAY_NAME", "POOL_NAME", "CHRONOZONE", "PLAY_TYPE", "SS", "THK", "TAREA", "TVOL", "OTHK", "OAREA", "OVOL", "GTHK", "GAREA", "GVOL", "DRIVE", "YIELD", "GOR", "SPGR", "BGI", "BOI", "GRF", "GRECO", "GRECG", "GRP", "NCNT", "UCNT", "BHCOMP", ], axis=1, ) data = data.drop(data[data["ORF"] > 1].index) data = data.drop( [ "FSTRUC", "FTRAP1", "FTRAP2", "PLAREA", "WDEP", "Original Gas", "Cum Gas", "Gas Reserves", "BOE Reserves", "SDPG", "SDTG", "RSI", "GIP", "ORECG", "ORP", "TCNT", "SN_FORMSAND", "Original BOE", "Cum BOE", "PROP", "FCLASS", "RECG_AF", ], axis=1, ) data = data.drop(data[data["SD_TYPE"] == "W"].index) data["FSTAT"] = data["FSTAT"].replace({"E": 0, "A": 1}) data["RESTYP"] = data["RESTYP"].replace({"N": 0, "U": 1, "S": 2}) data["SD_TYPE"] = data["SD_TYPE"].replace({"G": 0, "B": 1, "O": 2}) data = data.drop(data[data["ORF"] == 0].index) # data['PERMEABILITY'] = data['PERMEABILITY'].apply(np.log) data = data.dropna().reset_index() data.head() from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn import linear_model import seaborn as sns from sklearn.linear_model import Ridge from sklearn.linear_model import RidgeCV from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import accuracy_score from keras.optimizers import Adam from tensorflow.keras.optimizers import SGD from tensorflow.keras.callbacks import EarlyStopping from sklearn.svm import SVR from keras.activations import sigmoid def clean_dataset(df): assert isinstance(df, pd.DataFrame), "df needs to be a pd.DataFrame" df.dropna(inplace=True) indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf]).any(axis=1) return df[indices_to_keep].astype(np.float64) data = clean_dataset(data) features = ["POROSITY", "PERMEABILITY", "SW"] target = "ORF" scaler = MinMaxScaler() X = data[features].values X = scaler.fit_transform(X) y = data[target].values X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) model_linear = linear_model.LinearRegression(positive=True) model_linear.fit(X_train, y_train) y_pred = model_linear.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(y_test, y_pred, color="blue") plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) alphas = [0.1, 0.5, 1] model_ridgecv = RidgeCV(alphas=alphas) model_ridgecv.fit(X_train, y_train) y_pred = model_ridgecv.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model_lasso = Lasso(alpha=1.0) model_lasso.fit(X_train, y_train) y_pred = model_lasso.predict(X_test) print("Коэффициенты регрессии:") print(model_lasso.coef_) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model_dtr = DecisionTreeRegressor(splitter="best", random_state=5) model_dtr.fit(X_train, y_train) y_pred = model_dtr.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(X_train[:, 0], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 0], y_pred, color="red", label="Test Data") plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model_gbr = GradientBoostingRegressor(n_estimators=100, learning_rate=0.01) model_gbr.fit(X_train, y_train) y_pred = model_gbr.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(X_train[:, 0], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 0], y_pred, color="red", label="Test Data") plt.show() plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model_rfr = RandomForestRegressor(n_estimators=100) model_rfr.fit(X_train, y_train) y_pred = model_rfr.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(y_test, y_pred) plt.show() plt.scatter(X_train[:, 0], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 0], y_pred, color="red", label="Test Data") plt.show() plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) model = keras.Sequential() model.add(layers.Dense(25, activation="relu", input_shape=(3,))) model.add(layers.Dense(5, activation=sigmoid)) model.add(layers.Dense(1)) model.compile(optimizer="adam", loss="mean_squared_error") # early_stopping = EarlyStopping(monitor='val_loss', patience=10) model.fit( X_train, y_train, epochs=100, batch_size=32, verbose=1, validation_data=(X_test, y_test), ) loss = model.evaluate(X_test, y_test) y_pred = model.predict(X_test) plt.scatter(X_train[:, 0], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 0], y_pred, color="red", label="Test Data") plt.show() plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) model_svr = SVR(kernel="rbf", C=1.0, epsilon=0.1) model_svr.fit(X_train, y_train) y_pred = model_svr.predict(X_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print("MSE: {}".format(mse)) print("MAE: {}".format(mae)) print("R^2: {}".format(r2)) plt.scatter(X_train[:, 0], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 0], y_pred, color="red", label="Test Data") plt.show() plt.scatter(X_train[:, 1], y_train, color="blue", label="Train Data") plt.scatter(X_test[:, 1], y_pred, color="red", label="Test Data") plt.show() new_data = [[0.2, 0.3, 0.3]] print("linear: {}".format(model_linear.predict(new_data))) print("ridgecv: {}".format(model_ridgecv.predict(new_data))) print("lass: {}".format(model_lasso.predict(new_data))) print("desicion tree: {}".format(model_dtr.predict(new_data))) print("gradient boosting: {}".format(model_gbr.predict(new_data))) print("rfr: {}".format(model_rfr.predict(new_data))) print("AI: {}".format(model.predict(new_data))) print("SVR: {}".format(model_svr.predict(new_data)))
false
0
3,576
0
3,576
3,576
129088507
<jupyter_start><jupyter_text>pens and printers sales data Kaggle dataset identifier: pens-and-printers-sales-data <jupyter_script># # Business Problem Definition # The business problem at hand is to **determine the most effective sales strategy for Pens and Printers' new line of office stationery**. The company has tested three different sales strategies, namely email, phone calls, and a combination of email and phone calls, to sell the new product line. The sales team needs to understand which approach worked the best and if there were any differences in revenue over time for each of the methods. Additionally, the team wants to identify if there are other differences between the customers in each group that could provide context for what went well. The goal is to use the findings to inform the decision of which sales strategy to continue using to sell the new product line. # # Data validation # This step starts with performing data prepration tasks including removing duplicates, dealing with missing values, and transforming variables. The dataset contains **15,000 rows and 8 columns** before cleaning and validataion. I have validated all the columns against the criteria in the dataset table: # **Here's a breakdown of each column's validation and cleaning steps:** # - "Week": The "Week" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. Unique values: 6 # - "Sales Method": The "Sales Method" column is validated by checking the unique values and the number of missing values. No cleaning is performed on this column. Unique values: ['Email + Call' 'Call' 'Email' 'em + call' 'email' ] # - "Customer ID": The "Customer ID" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. - Unique values: 15000 # - "Number of Products Sold": The "Number of Products Sold" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. # - "Revenue": The "Revenue" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. Unique values: 6743 # - "Years as Customer": The "Years as Customer" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. Unique values: 42 # - "Number of Site Visits": The "Number of Site Visits" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. Unique values: 26 # - "State": The "State" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. Unique values: 50 # - Data types of all columns are validated. # **After validating each column, two cleaning steps are performed on the entire dataset:** # - Repalcing missing values with median of revenue column: Rows with missing values are repalced with median of revenue column using the fillna() method. We replaced 1074 missing values in revenue column. # - Remove duplicates: Duplicate rows are removed from the dataset using the drop_duplicates() method. we didn't find duplicates in the dataset. # **After two cleaning steps, we transform the sales_method column:** # We use the replace function to replace all occurrences of 'em + call' with 'Email + Call' and 'email' with 'Email' in the sales_method column. So, we have only three columns representing the sales method 'Email + Call', 'Call', 'Email' # After the data validation, the dataset contains **15000 rows and 8 columns**. # import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Load the data data = pd.read_csv("sales_data.csv") # Data validation and cleaning # Validate each column print("Week") print(" - Unique values: ", data["week"].nunique()) print(" - Missing values: ", data["week"].isnull().sum()) print("Sales Method") print(" - Unique values: ", data["sales_method"].unique()) print(" - Missing values: ", data["sales_method"].isnull().sum()) print("Customer ID") print(" - Unique values: ", data["customer_id"].nunique()) print(" - Missing values: ", data["customer_id"].isnull().sum()) print("Number of Products Sold") print(" - Unique values: ", data["nb_sold"].nunique()) print(" - Missing values: ", data["nb_sold"].isnull().sum()) print("Revenue") print(" - Unique values: ", data["revenue"].nunique()) print(" - Missing values: ", data["revenue"].isnull().sum()) print("Years as Customer") print(" - Unique values: ", data["years_as_customer"].nunique()) print(" - Missing values: ", data["years_as_customer"].isnull().sum()) print("Number of Site Visits") print(" - Unique values: ", data["nb_site_visits"].nunique()) print(" - Missing values: ", data["nb_site_visits"].isnull().sum()) print("State") print(" - Unique values: ", data["state"].nunique()) print(" - Missing values: ", data["state"].isnull().sum()) # Check data types of each column print(data.dtypes) # Calculate the median of the "revenue" column revenue_median = data["revenue"].median() # Replace the missing (NA) values in the "revenue" column with the calculated median data["revenue"].fillna(revenue_median, inplace=True) # Remove duplicate rows using the drop_duplicates() method data = data.drop_duplicates() # Transforming the data # Rename the data in 'sales_method' column data["sales_method"] = data["sales_method"].replace("em + call", "Email + Call") data["sales_method"] = data["sales_method"].replace("email", "Email") # Save the updated data to CSV file data.to_csv("sales_data.csv", index=False) # # Exploratory Analysis # Here we explore the dataset on hand using the cleaned and validated version of data # Descriptive statistics # Number of unique customers num_customers = data["customer_id"].nunique() # Number of sales made num_sales = len(data) # Median revenue per sale med_revenue = data["revenue"].median() # Total revenue total_revenue = data["revenue"].sum() # Range of revenue revenue_range = "${:.2f} to ${:.2f}".format( data["revenue"].min(), data["revenue"].max() ) # Median number of products sold per transaction med_products = data["nb_sold"].median() # Median years as a customer med_years = data["years_as_customer"].median() # Range of years as a customer years_range = "{} to {}".format( data["years_as_customer"].min(), data["years_as_customer"].max() ) # Median number of site visits in the last 6 months med_visits = data["nb_site_visits"].median() # Range of site visits in the last 6 months visits_range = "{} to {}".format( data["nb_site_visits"].min(), data["nb_site_visits"].max() ) # Print results print("Number of unique customers:", num_customers) print("Number of sales made:", num_sales) print("Median revenue per sale: ${:.2f}".format(med_revenue)) print("Total revenue: ${:.2f}".format(total_revenue)) print("Range of revenue:", revenue_range) print("Median number of products sold per transaction:", med_products) print("Median years as a customer:", med_years) print("Range of years as a customer:", years_range) print("Median number of site visits in the last 6 months:", med_visits) print("Range of site visits in the last 6 months:", visits_range) # # In depth dataset investigation # ## What is the distribution of revenue values? # **The revenue values have a skewed distribution to the right, with most values falling between 32.54 and 238.32. The median revenue value is 89.50, which is lower than the mean value of 93.62 due to the presence of high revenue outliers.** The histogram shows a decrease in bin counts towards the tails of the distribution, and the box plot confirms the presence of outliers. This suggests that a few high revenue values significantly differ from the rest. # Create a histogram for revenue values plt.figure(figsize=(10, 6)) sns.histplot(data=data, x="revenue", bins=30, kde=True) plt.title("Distribution of Revenue Values") plt.xlabel("Revenue") plt.ylabel("Frequency") plt.show() # Create a box plot for revenue values plt.figure(figsize=(10, 6)) sns.boxplot(x=data["revenue"]) plt.title("Box Plot of Revenue Values") plt.xlabel("Revenue") plt.show() # Calculate summary statistics min_revenue = data["revenue"].min() max_revenue = data["revenue"].max() mean_revenue = data["revenue"].mean() # ## How many customers were there for each sales method? # Based on the data from last 6 weeks sine product launch, 'email' sales method represent 50%, 'call' 33% and **'email + call' has the lowest share of total customer 17%**. # ![Percentage of customers per sales method.png](attachment:56bb347d-1038-4a85-9c3b-e8dda63275de.png) import pandas as pd import matplotlib.pyplot as plt ## More in depth data investigation ## How many customers were there for each sales method? customer_count = ( data.groupby("sales_method")["customer_id"].nunique().sort_values(ascending=False) ) # Define the colors for each sales method colors = ["#4472C4", "#ED7D31", "#A5A5A5"] # Create the bar plot with the customized colors plt.bar(customer_count.index, customer_count.values, color=colors) plt.xlabel("Sales Method") plt.ylabel("Number of Customers") plt.title("Number of Customers per Sales Method") plt.show() print(customer_count) # ## What does the spread of the revenue look like overall? And the median for each method? # The data shows that **the median revenue is highest for the "Email + Call" method with a median of 182.14** despite having lowest number of customers and aslo it has a standard deviation of 29.08. The "Email" method has a median revenue of 94.28, and the "Call" method has the lowest median revenue of 49.94. The 75th percentile revenue for each method ranges from 52.98 to 189.54, with the highest 75th percentile revenue for the "Email + Call" method. The maximum revenue for each method ranges from 89.50 to 238.32, with the highest maximum revenue for the "Email + Call" method. ## What does the spread of the revenue look like overall? And for each method? # Compute total revenue by sales method total_revenue_by_method = ( data.groupby("sales_method")["revenue"].sum().sort_values(ascending=False) ) # Define the colors for each sales method colors = ["#4472C4", "#A5A5A5", "#ED7D31"] # Create bar chart plt.bar(total_revenue_by_method.index, total_revenue_by_method.values, color=colors) plt.title("Total Revenue by Sales Method") plt.xlabel("Sales Method") plt.ylabel("Total Revenue") plt.show() overall_revenue = data["revenue"].describe() print("Overall Revenue:") print(overall_revenue) revenue_by_method = data.groupby("sales_method")["revenue"].describe() print("Revenue by Method:") print(revenue_by_method) # ![Median of revenue by sales method.png](attachment:221226a4-8352-442f-809a-fd972898d9aa.png) # Compute median revenue per customer for each sales method revenue_per_customer = data.groupby("sales_method")["revenue"].median() # Define the colors for each sales method colors = ["#ED7D31", "#4472C4", "#A5A5A5"] # Create scatter plot with colored markers plt.scatter(revenue_per_customer.index, revenue_per_customer.values, c=colors) plt.title("Median revenue by sales method") plt.xlabel("Median sales revenue") plt.ylabel("Sales method") plt.show() print(revenue_per_customer) # ## What is the trend of the number of products sold by sales method over time? # The trend of the number of products sold by sales method over time can be observed from the table as follows: # - For the Call sales method, the number of products sold increased from week 1 to week 6. # - For the Email sales method, the number of products sold increased from week 1 to week 6. # - For the Email + Call sales method, the number of products sold increased from week 1 to week 6. # Overall, all sales methods show an increasing trend in the number of products sold over time. ## What is the trend of the number of products sold by sales method over time? # Convert 'week' column to datetime object data["week"] = pd.to_datetime(data["week"]) # Group data by sales method and week, and sum the number of products sold sales_by_week = data.groupby(["sales_method", "week"])["nb_sold"].median().reset_index() # Define color palette colors = {"Email": "#4472C4", "Call": "#ED7D31", "Email + Call": "#A5A5A5"} # Pivot the data to create a table with sales method as columns and week as rows sales_pivot = sales_by_week.pivot( index="week", columns="sales_method", values="nb_sold" ) # Create an area plot sales_pivot.plot.area(color=colors) plt.title("Trend of Number of Products Sold by Sales Method over Time") plt.xlabel("Week") plt.ylabel("Number of Products Sold") plt.show() print(sales_by_week) # ## What median of sold units by sales method? # Below chart suggests that **median revenue of the "Email + Call" sales method is the highest number of units sold 12** followed by "Email" and "Call" # ![Median of sold units by sales method.png](attachment:f9dadcbb-47fa-4df7-b3e4-44c16ef07cc0.png) # ## Was there any difference in revenue over time for each of the methods? # Based on the updated data, we can see that **sales revenues by 'Email+Call' consistently increase over time.** For the 'Call' method, there is a general upward trend in revenue, while the 'Email' method shows a more fluctuating pattern, with some increases and decreases in revenue. In the last week, all three methods experienced an increase in sales revenues, with 'Email+Call' having the most significant growth. # ## Was there any difference in revenue over time for each of the methods? revenue_over_time = data.groupby(["week", "sales_method"])["revenue"].median().unstack() # Define color palette colors = {"Email": "#4472C4", "Call": "#ED7D31", "Email + Call": "#A5A5A5"} revenue_over_time.plot(kind="line", color=colors) plt.title("Revenue over time for each of the methods") plt.show() print(revenue_over_time) # ## What is the relationship between the number of site visits and the revenue achieved by each sales method over time? # Based on the updated data provided, it appears that **there is a positive relationship between the number of site visits and the revenue achieved by each sales method over time.** Analyzing the data, we can see that **the revenue achieved by the 'Email + Call' method is the highest among the three sales methods for each week.** Furthermore, we can observe that as the number of site visits increases, the revenue also increases for each week and sales method. # Group the data by year, sales method, and calculate the median revenue and number of site visits grouped_data = ( data.groupby(["week", "sales_method"]) .agg({"revenue": "median", "nb_site_visits": "median"}) .reset_index() ) # Define the colors for each sales method colors = ["#ED7D31", "#4472C4", "#A5A5A5"] # Create bubble plot for Call sales method plt.scatter( x=grouped_data.loc[grouped_data["sales_method"] == "Call", "week"], y=grouped_data.loc[grouped_data["sales_method"] == "Call", "revenue"], s=grouped_data.loc[grouped_data["sales_method"] == "Call", "nb_site_visits"] * 10, c=colors[0], alpha=0.5, label="Call", ) # Create bubble plot for Email sales method plt.scatter( x=grouped_data.loc[grouped_data["sales_method"] == "Email", "week"], y=grouped_data.loc[grouped_data["sales_method"] == "Email", "revenue"], s=grouped_data.loc[grouped_data["sales_method"] == "Email", "nb_site_visits"] * 10, c=colors[1], alpha=0.5, label="Email", ) # Create bubble plot for Email + Call sales method plt.scatter( x=grouped_data.loc[grouped_data["sales_method"] == "Email + Call", "week"], y=grouped_data.loc[grouped_data["sales_method"] == "Email + Call", "revenue"], s=grouped_data.loc[grouped_data["sales_method"] == "Email + Call", "nb_site_visits"] * 10, c=colors[2], alpha=0.5, label="Email + Call", ) # Set plot properties plt.title( "Relationship Between Number of Site Visits and Revenue by Sales Method Over Time" ) plt.xlabel("week") plt.ylabel("Revenue") plt.legend(title="Sales Method") # Display the plot plt.show() print(grouped_data) # ## Which states have higher revenues by each sales method? # Total revenue per state gives us an idea of the **overall revenue generated by a state**, regardless of the sales method used. It is useful for identifying which states contribute the most to the company's revenue, which are **California, Texas, New York and Florida**. Median revenue per state, on the other hand, provides an idea of the revenue generated by a state for a particular sales method. It is useful for identifying which sales methods are most effective in different states, as shown in the data below: # This data can be used to analyze the effectiveness of different sales methods across various states. ## Which states have higher revenues by each sales method? # Define the colors for each sales method colors = ["#ED7D31", "#4472C4", "#A5A5A5"] # Subset the data to only include sales by state and sales method state_revenues = ( data.groupby(["state", "sales_method"]).agg({"revenue": "median"}).reset_index() ) # Pivot the data to wide format for plotting state_revenues_wide = state_revenues.pivot( index="state", columns="sales_method", values="revenue" ) # Plot the data as a grouped bar chart state_revenues_wide.plot(kind="bar", figsize=(10, 6), color=colors) plt.title("Revenue by State and Sales Method") plt.xlabel("state") plt.ylabel("Revenue") plt.legend(title="Sales Method") plt.show() pd.set_option("display.max_rows", None) print(state_revenues) # ![Total revnue per state.png](attachment:94f29468-5494-4ab6-b831-69aa3482bce4.png) # ## What is the relationship between customer years with the company and median revenues achieved by each sales method? # The given data shows the relationship between the customer years with the company and the median revenues achieved by each sales method. The data is grouped into three categories based on the years a customer has been with the company, i.e., 0-5 years, 5-10 years, and more than 10 years. The median revenues achieved by each sales method are calculated for each of these categories. # From the data, we can see that there is not a significant difference in the median revenues achieved by each sales method across the three categories. The revenues achieved by each sales method are relatively consistent across the three categories, with only slight variations. # Therefore, we can conclude that **there is not a strong relationship between customer years with the company and the median revenues achieved by each sales method.** # ## What is the relationship between customer years with the company and median revenues achieved by each sales method? # Divide years as customer to 5 ranges data["years_range"] = pd.cut( data["years_as_customer"], bins=[0, 5, 10, 100], labels=["0-5", "5-10", "10+"] ) # Define the colors for each sales method colors = ["#ED7D31", "#4472C4", "#A5A5A5"] # Group the data by years range, sales method, and calculate the median revenue med_revenue_by_years_sales = ( data.groupby(["years_range", "sales_method"])["revenue"].median().reset_index() ) # Pivot the table to make it suitable for a line chart line_chart_data = med_revenue_by_years_sales.pivot( index="years_range", columns="sales_method", values="revenue" ) # Create a line chart line_chart_data.plot(kind="line", marker="o", color=colors) plt.title("Median Revenue by Years Range and Sales Method") plt.xlabel("Years Range") plt.ylabel("Median Revenue") plt.show() print(med_revenue_by_years_sales) # ## What is the relationship between customer years with company and median units sold by each sales method? # Based on the provided data, we can see that **the median units sold by each sales method remain relatively consistent across the three customer year ranges.** For each sales method (Call, Email, and Email + Call), the median units sold is 9.0 for Call and Email, and 12.0 for Email + Call, in all three customer year ranges (0-5 years, 5-10 years, and 10+ years). This suggests that the sales method's effectiveness remains constant regardless of the years a customer has been with the company. Additionally, **the median units sold by the Email + Call sales method is higher than the other two sales methods in all three customer year ranges**, indicating that this sales method may be more effective at generating sales. # Divide years as customer to 5 ranges data["years_range"] = pd.cut( data["years_as_customer"], bins=[0, 5, 10, 100], labels=["0-5", "5-10", "10+"] ) # Define the colors for each sales method colors = ["#ED7D31", "#4472C4", "#A5A5A5"] # Group the data by years range, sales method, and calculate the median revenue avg_products_by_years_sales = ( data.groupby(["years_range", "sales_method"])["nb_sold"].median().reset_index() ) # Create scatter plot sns.scatterplot( data=avg_products_by_years_sales, x="years_range", y="nb_sold", hue="sales_method", palette=colors, ) # Set plot properties plt.title( "Relationship Between Customer Years with Company and Median Units Sold by Sales Method" ) plt.xlabel("Years as Customer") plt.ylabel("Median Units Sold") plt.legend(title="Sales Method") # Display the plot plt.show() # Display the grouped data pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) print(avg_products_by_years_sales)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/088/129088507.ipynb
pens-and-printers-sales-data
amrmelharony
[{"Id": 129088507, "ScriptId": 38374963, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13105506, "CreationDate": "05/10/2023 23:19:09", "VersionNumber": 2.0, "Title": "Pens and Printers New Line o f Business - DataCamp", "EvaluationDate": "05/10/2023", "IsChange": false, "TotalLines": 447.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 447.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184832185, "KernelVersionId": 129088507, "SourceDatasetVersionId": 5659121}]
[{"Id": 5659121, "DatasetId": 3252454, "DatasourceVersionId": 5734543, "CreatorUserId": 13105506, "LicenseName": "Unknown", "CreationDate": "05/10/2023 23:17:48", "VersionNumber": 1.0, "Title": "pens and printers sales data", "Slug": "pens-and-printers-sales-data", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3252454, "CreatorUserId": 13105506, "OwnerUserId": 13105506.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5659121.0, "CurrentDatasourceVersionId": 5734543.0, "ForumId": 3317891, "Type": 2, "CreationDate": "05/10/2023 23:17:48", "LastActivityDate": "05/10/2023", "TotalViews": 86, "TotalDownloads": 4, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 13105506, "UserName": "amrmelharony", "DisplayName": "Amr Elharony", "RegisterDate": "12/31/2022", "PerformanceTier": 0}]
# # Business Problem Definition # The business problem at hand is to **determine the most effective sales strategy for Pens and Printers' new line of office stationery**. The company has tested three different sales strategies, namely email, phone calls, and a combination of email and phone calls, to sell the new product line. The sales team needs to understand which approach worked the best and if there were any differences in revenue over time for each of the methods. Additionally, the team wants to identify if there are other differences between the customers in each group that could provide context for what went well. The goal is to use the findings to inform the decision of which sales strategy to continue using to sell the new product line. # # Data validation # This step starts with performing data prepration tasks including removing duplicates, dealing with missing values, and transforming variables. The dataset contains **15,000 rows and 8 columns** before cleaning and validataion. I have validated all the columns against the criteria in the dataset table: # **Here's a breakdown of each column's validation and cleaning steps:** # - "Week": The "Week" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. Unique values: 6 # - "Sales Method": The "Sales Method" column is validated by checking the unique values and the number of missing values. No cleaning is performed on this column. Unique values: ['Email + Call' 'Call' 'Email' 'em + call' 'email' ] # - "Customer ID": The "Customer ID" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. - Unique values: 15000 # - "Number of Products Sold": The "Number of Products Sold" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. # - "Revenue": The "Revenue" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. Unique values: 6743 # - "Years as Customer": The "Years as Customer" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. Unique values: 42 # - "Number of Site Visits": The "Number of Site Visits" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. Unique values: 26 # - "State": The "State" column is validated by checking the number of unique values and the number of missing values. No cleaning is performed on this column. Unique values: 50 # - Data types of all columns are validated. # **After validating each column, two cleaning steps are performed on the entire dataset:** # - Repalcing missing values with median of revenue column: Rows with missing values are repalced with median of revenue column using the fillna() method. We replaced 1074 missing values in revenue column. # - Remove duplicates: Duplicate rows are removed from the dataset using the drop_duplicates() method. we didn't find duplicates in the dataset. # **After two cleaning steps, we transform the sales_method column:** # We use the replace function to replace all occurrences of 'em + call' with 'Email + Call' and 'email' with 'Email' in the sales_method column. So, we have only three columns representing the sales method 'Email + Call', 'Call', 'Email' # After the data validation, the dataset contains **15000 rows and 8 columns**. # import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Load the data data = pd.read_csv("sales_data.csv") # Data validation and cleaning # Validate each column print("Week") print(" - Unique values: ", data["week"].nunique()) print(" - Missing values: ", data["week"].isnull().sum()) print("Sales Method") print(" - Unique values: ", data["sales_method"].unique()) print(" - Missing values: ", data["sales_method"].isnull().sum()) print("Customer ID") print(" - Unique values: ", data["customer_id"].nunique()) print(" - Missing values: ", data["customer_id"].isnull().sum()) print("Number of Products Sold") print(" - Unique values: ", data["nb_sold"].nunique()) print(" - Missing values: ", data["nb_sold"].isnull().sum()) print("Revenue") print(" - Unique values: ", data["revenue"].nunique()) print(" - Missing values: ", data["revenue"].isnull().sum()) print("Years as Customer") print(" - Unique values: ", data["years_as_customer"].nunique()) print(" - Missing values: ", data["years_as_customer"].isnull().sum()) print("Number of Site Visits") print(" - Unique values: ", data["nb_site_visits"].nunique()) print(" - Missing values: ", data["nb_site_visits"].isnull().sum()) print("State") print(" - Unique values: ", data["state"].nunique()) print(" - Missing values: ", data["state"].isnull().sum()) # Check data types of each column print(data.dtypes) # Calculate the median of the "revenue" column revenue_median = data["revenue"].median() # Replace the missing (NA) values in the "revenue" column with the calculated median data["revenue"].fillna(revenue_median, inplace=True) # Remove duplicate rows using the drop_duplicates() method data = data.drop_duplicates() # Transforming the data # Rename the data in 'sales_method' column data["sales_method"] = data["sales_method"].replace("em + call", "Email + Call") data["sales_method"] = data["sales_method"].replace("email", "Email") # Save the updated data to CSV file data.to_csv("sales_data.csv", index=False) # # Exploratory Analysis # Here we explore the dataset on hand using the cleaned and validated version of data # Descriptive statistics # Number of unique customers num_customers = data["customer_id"].nunique() # Number of sales made num_sales = len(data) # Median revenue per sale med_revenue = data["revenue"].median() # Total revenue total_revenue = data["revenue"].sum() # Range of revenue revenue_range = "${:.2f} to ${:.2f}".format( data["revenue"].min(), data["revenue"].max() ) # Median number of products sold per transaction med_products = data["nb_sold"].median() # Median years as a customer med_years = data["years_as_customer"].median() # Range of years as a customer years_range = "{} to {}".format( data["years_as_customer"].min(), data["years_as_customer"].max() ) # Median number of site visits in the last 6 months med_visits = data["nb_site_visits"].median() # Range of site visits in the last 6 months visits_range = "{} to {}".format( data["nb_site_visits"].min(), data["nb_site_visits"].max() ) # Print results print("Number of unique customers:", num_customers) print("Number of sales made:", num_sales) print("Median revenue per sale: ${:.2f}".format(med_revenue)) print("Total revenue: ${:.2f}".format(total_revenue)) print("Range of revenue:", revenue_range) print("Median number of products sold per transaction:", med_products) print("Median years as a customer:", med_years) print("Range of years as a customer:", years_range) print("Median number of site visits in the last 6 months:", med_visits) print("Range of site visits in the last 6 months:", visits_range) # # In depth dataset investigation # ## What is the distribution of revenue values? # **The revenue values have a skewed distribution to the right, with most values falling between 32.54 and 238.32. The median revenue value is 89.50, which is lower than the mean value of 93.62 due to the presence of high revenue outliers.** The histogram shows a decrease in bin counts towards the tails of the distribution, and the box plot confirms the presence of outliers. This suggests that a few high revenue values significantly differ from the rest. # Create a histogram for revenue values plt.figure(figsize=(10, 6)) sns.histplot(data=data, x="revenue", bins=30, kde=True) plt.title("Distribution of Revenue Values") plt.xlabel("Revenue") plt.ylabel("Frequency") plt.show() # Create a box plot for revenue values plt.figure(figsize=(10, 6)) sns.boxplot(x=data["revenue"]) plt.title("Box Plot of Revenue Values") plt.xlabel("Revenue") plt.show() # Calculate summary statistics min_revenue = data["revenue"].min() max_revenue = data["revenue"].max() mean_revenue = data["revenue"].mean() # ## How many customers were there for each sales method? # Based on the data from last 6 weeks sine product launch, 'email' sales method represent 50%, 'call' 33% and **'email + call' has the lowest share of total customer 17%**. # ![Percentage of customers per sales method.png](attachment:56bb347d-1038-4a85-9c3b-e8dda63275de.png) import pandas as pd import matplotlib.pyplot as plt ## More in depth data investigation ## How many customers were there for each sales method? customer_count = ( data.groupby("sales_method")["customer_id"].nunique().sort_values(ascending=False) ) # Define the colors for each sales method colors = ["#4472C4", "#ED7D31", "#A5A5A5"] # Create the bar plot with the customized colors plt.bar(customer_count.index, customer_count.values, color=colors) plt.xlabel("Sales Method") plt.ylabel("Number of Customers") plt.title("Number of Customers per Sales Method") plt.show() print(customer_count) # ## What does the spread of the revenue look like overall? And the median for each method? # The data shows that **the median revenue is highest for the "Email + Call" method with a median of 182.14** despite having lowest number of customers and aslo it has a standard deviation of 29.08. The "Email" method has a median revenue of 94.28, and the "Call" method has the lowest median revenue of 49.94. The 75th percentile revenue for each method ranges from 52.98 to 189.54, with the highest 75th percentile revenue for the "Email + Call" method. The maximum revenue for each method ranges from 89.50 to 238.32, with the highest maximum revenue for the "Email + Call" method. ## What does the spread of the revenue look like overall? And for each method? # Compute total revenue by sales method total_revenue_by_method = ( data.groupby("sales_method")["revenue"].sum().sort_values(ascending=False) ) # Define the colors for each sales method colors = ["#4472C4", "#A5A5A5", "#ED7D31"] # Create bar chart plt.bar(total_revenue_by_method.index, total_revenue_by_method.values, color=colors) plt.title("Total Revenue by Sales Method") plt.xlabel("Sales Method") plt.ylabel("Total Revenue") plt.show() overall_revenue = data["revenue"].describe() print("Overall Revenue:") print(overall_revenue) revenue_by_method = data.groupby("sales_method")["revenue"].describe() print("Revenue by Method:") print(revenue_by_method) # ![Median of revenue by sales method.png](attachment:221226a4-8352-442f-809a-fd972898d9aa.png) # Compute median revenue per customer for each sales method revenue_per_customer = data.groupby("sales_method")["revenue"].median() # Define the colors for each sales method colors = ["#ED7D31", "#4472C4", "#A5A5A5"] # Create scatter plot with colored markers plt.scatter(revenue_per_customer.index, revenue_per_customer.values, c=colors) plt.title("Median revenue by sales method") plt.xlabel("Median sales revenue") plt.ylabel("Sales method") plt.show() print(revenue_per_customer) # ## What is the trend of the number of products sold by sales method over time? # The trend of the number of products sold by sales method over time can be observed from the table as follows: # - For the Call sales method, the number of products sold increased from week 1 to week 6. # - For the Email sales method, the number of products sold increased from week 1 to week 6. # - For the Email + Call sales method, the number of products sold increased from week 1 to week 6. # Overall, all sales methods show an increasing trend in the number of products sold over time. ## What is the trend of the number of products sold by sales method over time? # Convert 'week' column to datetime object data["week"] = pd.to_datetime(data["week"]) # Group data by sales method and week, and sum the number of products sold sales_by_week = data.groupby(["sales_method", "week"])["nb_sold"].median().reset_index() # Define color palette colors = {"Email": "#4472C4", "Call": "#ED7D31", "Email + Call": "#A5A5A5"} # Pivot the data to create a table with sales method as columns and week as rows sales_pivot = sales_by_week.pivot( index="week", columns="sales_method", values="nb_sold" ) # Create an area plot sales_pivot.plot.area(color=colors) plt.title("Trend of Number of Products Sold by Sales Method over Time") plt.xlabel("Week") plt.ylabel("Number of Products Sold") plt.show() print(sales_by_week) # ## What median of sold units by sales method? # Below chart suggests that **median revenue of the "Email + Call" sales method is the highest number of units sold 12** followed by "Email" and "Call" # ![Median of sold units by sales method.png](attachment:f9dadcbb-47fa-4df7-b3e4-44c16ef07cc0.png) # ## Was there any difference in revenue over time for each of the methods? # Based on the updated data, we can see that **sales revenues by 'Email+Call' consistently increase over time.** For the 'Call' method, there is a general upward trend in revenue, while the 'Email' method shows a more fluctuating pattern, with some increases and decreases in revenue. In the last week, all three methods experienced an increase in sales revenues, with 'Email+Call' having the most significant growth. # ## Was there any difference in revenue over time for each of the methods? revenue_over_time = data.groupby(["week", "sales_method"])["revenue"].median().unstack() # Define color palette colors = {"Email": "#4472C4", "Call": "#ED7D31", "Email + Call": "#A5A5A5"} revenue_over_time.plot(kind="line", color=colors) plt.title("Revenue over time for each of the methods") plt.show() print(revenue_over_time) # ## What is the relationship between the number of site visits and the revenue achieved by each sales method over time? # Based on the updated data provided, it appears that **there is a positive relationship between the number of site visits and the revenue achieved by each sales method over time.** Analyzing the data, we can see that **the revenue achieved by the 'Email + Call' method is the highest among the three sales methods for each week.** Furthermore, we can observe that as the number of site visits increases, the revenue also increases for each week and sales method. # Group the data by year, sales method, and calculate the median revenue and number of site visits grouped_data = ( data.groupby(["week", "sales_method"]) .agg({"revenue": "median", "nb_site_visits": "median"}) .reset_index() ) # Define the colors for each sales method colors = ["#ED7D31", "#4472C4", "#A5A5A5"] # Create bubble plot for Call sales method plt.scatter( x=grouped_data.loc[grouped_data["sales_method"] == "Call", "week"], y=grouped_data.loc[grouped_data["sales_method"] == "Call", "revenue"], s=grouped_data.loc[grouped_data["sales_method"] == "Call", "nb_site_visits"] * 10, c=colors[0], alpha=0.5, label="Call", ) # Create bubble plot for Email sales method plt.scatter( x=grouped_data.loc[grouped_data["sales_method"] == "Email", "week"], y=grouped_data.loc[grouped_data["sales_method"] == "Email", "revenue"], s=grouped_data.loc[grouped_data["sales_method"] == "Email", "nb_site_visits"] * 10, c=colors[1], alpha=0.5, label="Email", ) # Create bubble plot for Email + Call sales method plt.scatter( x=grouped_data.loc[grouped_data["sales_method"] == "Email + Call", "week"], y=grouped_data.loc[grouped_data["sales_method"] == "Email + Call", "revenue"], s=grouped_data.loc[grouped_data["sales_method"] == "Email + Call", "nb_site_visits"] * 10, c=colors[2], alpha=0.5, label="Email + Call", ) # Set plot properties plt.title( "Relationship Between Number of Site Visits and Revenue by Sales Method Over Time" ) plt.xlabel("week") plt.ylabel("Revenue") plt.legend(title="Sales Method") # Display the plot plt.show() print(grouped_data) # ## Which states have higher revenues by each sales method? # Total revenue per state gives us an idea of the **overall revenue generated by a state**, regardless of the sales method used. It is useful for identifying which states contribute the most to the company's revenue, which are **California, Texas, New York and Florida**. Median revenue per state, on the other hand, provides an idea of the revenue generated by a state for a particular sales method. It is useful for identifying which sales methods are most effective in different states, as shown in the data below: # This data can be used to analyze the effectiveness of different sales methods across various states. ## Which states have higher revenues by each sales method? # Define the colors for each sales method colors = ["#ED7D31", "#4472C4", "#A5A5A5"] # Subset the data to only include sales by state and sales method state_revenues = ( data.groupby(["state", "sales_method"]).agg({"revenue": "median"}).reset_index() ) # Pivot the data to wide format for plotting state_revenues_wide = state_revenues.pivot( index="state", columns="sales_method", values="revenue" ) # Plot the data as a grouped bar chart state_revenues_wide.plot(kind="bar", figsize=(10, 6), color=colors) plt.title("Revenue by State and Sales Method") plt.xlabel("state") plt.ylabel("Revenue") plt.legend(title="Sales Method") plt.show() pd.set_option("display.max_rows", None) print(state_revenues) # ![Total revnue per state.png](attachment:94f29468-5494-4ab6-b831-69aa3482bce4.png) # ## What is the relationship between customer years with the company and median revenues achieved by each sales method? # The given data shows the relationship between the customer years with the company and the median revenues achieved by each sales method. The data is grouped into three categories based on the years a customer has been with the company, i.e., 0-5 years, 5-10 years, and more than 10 years. The median revenues achieved by each sales method are calculated for each of these categories. # From the data, we can see that there is not a significant difference in the median revenues achieved by each sales method across the three categories. The revenues achieved by each sales method are relatively consistent across the three categories, with only slight variations. # Therefore, we can conclude that **there is not a strong relationship between customer years with the company and the median revenues achieved by each sales method.** # ## What is the relationship between customer years with the company and median revenues achieved by each sales method? # Divide years as customer to 5 ranges data["years_range"] = pd.cut( data["years_as_customer"], bins=[0, 5, 10, 100], labels=["0-5", "5-10", "10+"] ) # Define the colors for each sales method colors = ["#ED7D31", "#4472C4", "#A5A5A5"] # Group the data by years range, sales method, and calculate the median revenue med_revenue_by_years_sales = ( data.groupby(["years_range", "sales_method"])["revenue"].median().reset_index() ) # Pivot the table to make it suitable for a line chart line_chart_data = med_revenue_by_years_sales.pivot( index="years_range", columns="sales_method", values="revenue" ) # Create a line chart line_chart_data.plot(kind="line", marker="o", color=colors) plt.title("Median Revenue by Years Range and Sales Method") plt.xlabel("Years Range") plt.ylabel("Median Revenue") plt.show() print(med_revenue_by_years_sales) # ## What is the relationship between customer years with company and median units sold by each sales method? # Based on the provided data, we can see that **the median units sold by each sales method remain relatively consistent across the three customer year ranges.** For each sales method (Call, Email, and Email + Call), the median units sold is 9.0 for Call and Email, and 12.0 for Email + Call, in all three customer year ranges (0-5 years, 5-10 years, and 10+ years). This suggests that the sales method's effectiveness remains constant regardless of the years a customer has been with the company. Additionally, **the median units sold by the Email + Call sales method is higher than the other two sales methods in all three customer year ranges**, indicating that this sales method may be more effective at generating sales. # Divide years as customer to 5 ranges data["years_range"] = pd.cut( data["years_as_customer"], bins=[0, 5, 10, 100], labels=["0-5", "5-10", "10+"] ) # Define the colors for each sales method colors = ["#ED7D31", "#4472C4", "#A5A5A5"] # Group the data by years range, sales method, and calculate the median revenue avg_products_by_years_sales = ( data.groupby(["years_range", "sales_method"])["nb_sold"].median().reset_index() ) # Create scatter plot sns.scatterplot( data=avg_products_by_years_sales, x="years_range", y="nb_sold", hue="sales_method", palette=colors, ) # Set plot properties plt.title( "Relationship Between Customer Years with Company and Median Units Sold by Sales Method" ) plt.xlabel("Years as Customer") plt.ylabel("Median Units Sold") plt.legend(title="Sales Method") # Display the plot plt.show() # Display the grouped data pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) print(avg_products_by_years_sales)
false
0
6,010
0
6,040
6,010
129088906
# ### Read Excel and plot daily/monthly/yearly patterns import pandas as pd import matplotlib.pyplot as plt # ## Reading sheet names xl = pd.ExcelFile( "/kaggle/input/solarunsw/Re_ UNSW NEM Python code/Line 4 historical load trace.xlsx" ) print(xl.sheet_names) # ## Reading a sheet dict_df = pd.read_excel( "/kaggle/input/solarunsw/Re_ UNSW NEM Python code/Line 4 historical load trace.xlsx", sheet_name=["MRN330__4_MW", "YSN330__4_MW", "YSN330__4_MVA"], ) workbook = dict_df.get("YSN330__4_MVA") workbook.head() workbook["MVA max"].plot() input_ = [] for row in workbook.iloc: input_.append(row[2]) print(len(input_)) # ## Daily Pattern plt.plot(input_[0:48]) # ## Weekly pattern plt.plot(input_[0:48]) # ## Weekly Pattern plt.plot(input_[0 : 48 * 365])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/088/129088906.ipynb
null
null
[{"Id": 129088906, "ScriptId": 38352438, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1751233, "CreationDate": "05/10/2023 23:27:51", "VersionNumber": 2.0, "Title": "Read Excel and Plot", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 37.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 32.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ### Read Excel and plot daily/monthly/yearly patterns import pandas as pd import matplotlib.pyplot as plt # ## Reading sheet names xl = pd.ExcelFile( "/kaggle/input/solarunsw/Re_ UNSW NEM Python code/Line 4 historical load trace.xlsx" ) print(xl.sheet_names) # ## Reading a sheet dict_df = pd.read_excel( "/kaggle/input/solarunsw/Re_ UNSW NEM Python code/Line 4 historical load trace.xlsx", sheet_name=["MRN330__4_MW", "YSN330__4_MW", "YSN330__4_MVA"], ) workbook = dict_df.get("YSN330__4_MVA") workbook.head() workbook["MVA max"].plot() input_ = [] for row in workbook.iloc: input_.append(row[2]) print(len(input_)) # ## Daily Pattern plt.plot(input_[0:48]) # ## Weekly pattern plt.plot(input_[0:48]) # ## Weekly Pattern plt.plot(input_[0 : 48 * 365])
false
0
293
0
293
293
129088788
# importing dataset from sklearn import datasets digit = datasets.load_digits() features = digit.data lables = digit.target import pandas as pa print(pa.DataFrame(features)) print(pa.DataFrame(lables)) print(digit.DESCR) imd = digit.images print(imd) X = pa.DataFrame(features) y = pa.DataFrame(lables) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.7, random_state=2529 ) X_train.shape, X_test.shape, y_train.shape, y_test.shape # from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier # clf = KNeighborsClassifier() clf = RandomForestClassifier() clf.fit(features, lables) y_pred = clf.predict(X_test) y_pred # Step 8 : model accuracy from sklearn.metrics import confusion_matrix, accuracy_score, classification_report confusion_matrix(y_test, y_pred) accuracy_score(y_test, y_pred) print(classification_report(y_test, y_pred)) print(features[32]) print(lables[32]) preds = clf.predict( [ [ 0.0, 2.0, 13.0, 16.0, 16.0, 16.0, 11.0, 0.0, 0.0, 5.0, 16.0, 10.0, 5.0, 4.0, 1.0, 0.0, 0.0, 6.0, 16.0, 7.0, 3.0, 0.0, 0.0, 0.0, 0.0, 9.0, 16.0, 16.0, 16.0, 6.0, 0.0, 0.0, 0.0, 3.0, 8.0, 4.0, 11.0, 15.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 12.0, 15.0, 0.0, 0.0, 0.0, 0.0, 4.0, 13.0, 16.0, 6.0, 0.0, 0.0, 0.0, 2.0, 16.0, 15.0, 8.0, 0.0, 0.0, 0.0, ] ] ) print(preds)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/088/129088788.ipynb
null
null
[{"Id": 129088788, "ScriptId": 38159410, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13473800, "CreationDate": "05/10/2023 23:24:52", "VersionNumber": 1.0, "Title": "digit_ML", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 58.0, "LinesInsertedFromPrevious": 58.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# importing dataset from sklearn import datasets digit = datasets.load_digits() features = digit.data lables = digit.target import pandas as pa print(pa.DataFrame(features)) print(pa.DataFrame(lables)) print(digit.DESCR) imd = digit.images print(imd) X = pa.DataFrame(features) y = pa.DataFrame(lables) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.7, random_state=2529 ) X_train.shape, X_test.shape, y_train.shape, y_test.shape # from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier # clf = KNeighborsClassifier() clf = RandomForestClassifier() clf.fit(features, lables) y_pred = clf.predict(X_test) y_pred # Step 8 : model accuracy from sklearn.metrics import confusion_matrix, accuracy_score, classification_report confusion_matrix(y_test, y_pred) accuracy_score(y_test, y_pred) print(classification_report(y_test, y_pred)) print(features[32]) print(lables[32]) preds = clf.predict( [ [ 0.0, 2.0, 13.0, 16.0, 16.0, 16.0, 11.0, 0.0, 0.0, 5.0, 16.0, 10.0, 5.0, 4.0, 1.0, 0.0, 0.0, 6.0, 16.0, 7.0, 3.0, 0.0, 0.0, 0.0, 0.0, 9.0, 16.0, 16.0, 16.0, 6.0, 0.0, 0.0, 0.0, 3.0, 8.0, 4.0, 11.0, 15.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 12.0, 15.0, 0.0, 0.0, 0.0, 0.0, 4.0, 13.0, 16.0, 6.0, 0.0, 0.0, 0.0, 2.0, 16.0, 15.0, 8.0, 0.0, 0.0, 0.0, ] ] ) print(preds)
false
0
687
0
687
687
129088986
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # ## Loading the test and train data sets model_test = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ) model_train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) model_test model_train print(model_train.columns) print(len(model_train.columns), "fetures present in training dataset") print(model_test.columns) print(len(model_test.columns), "fetures present in testing dataset") print(model_test.shape, "shape of testing dataset") print(model_train.shape, "shape of training dataset") model_train.info() model_test.info() model_train.describe() model_train.duplicated().sum() model_test.duplicated().sum() print(model_train.isnull().sum()) print("-----------") print("-----------") print("-----------") print(model_test.isnull().sum()) dataframe = pd.DataFrame(model_train.isnull().sum().sort_values(ascending=False)) print(dataframe.to_markdown()) dataframe_test = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) print(dataframe_test.to_markdown()) null = model_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) print(dataframe_null.to_markdown()) # ## Removing Unnecssary Values col_to_drop = null[null > 50].keys() train_df = model_train.drop(col_to_drop, axis=1) null_test = model_test.isnull().sum() / model_test.shape[0] * 100 null_test dataframe_null_test = pd.DataFrame(null_test.sort_values(ascending=False)) print(dataframe_null_test.to_markdown()) col_to_drop_test = null_test[null_test > 50].keys() test_df = model_test.drop(col_to_drop, axis=1) len(train_df.columns) len(test_df.columns) train_df.columns[train_df.isnull().any()] len(train_df.columns[train_df.isnull().any()]) train_df.skew() train_df["LotFrontage"] = train_df["LotFrontage"].fillna(train_df["LotFrontage"].mean()) train_df["MasVnrArea"] = train_df["MasVnrArea"].fillna(train_df["MasVnrArea"].mean()) train_df["GarageYrBlt"] = train_df["GarageYrBlt"].fillna( train_df["GarageYrBlt"].median() ) train_df["MasVnrType"] = train_df["MasVnrType"].fillna(train_df["MasVnrType"].mode()[0]) train_df["BsmtQual"] = train_df["BsmtQual"].fillna(train_df["BsmtQual"].mode()[0]) train_df["BsmtCond"] = train_df["BsmtCond"].fillna(train_df["BsmtCond"].mode()[0]) train_df["BsmtExposure"] = train_df["BsmtExposure"].fillna( train_df["BsmtExposure"].mode()[0] ) train_df["BsmtFinType1"] = train_df["BsmtFinType1"].fillna( train_df["BsmtFinType1"].mode()[0] ) train_df["BsmtFinType2"] = train_df["BsmtFinType2"].fillna( train_df["BsmtFinType2"].mode()[0] ) train_df["Electrical"] = train_df["Electrical"].fillna(train_df["Electrical"].mode()[0]) train_df["FireplaceQu"] = train_df["FireplaceQu"].fillna( train_df["FireplaceQu"].mode()[0] ) train_df["GarageType"] = train_df["GarageType"].fillna(train_df["GarageType"].mode()[0]) train_df["GarageFinish"] = train_df["GarageFinish"].fillna( train_df["GarageFinish"].mode()[0] ) train_df["GarageQual"] = train_df["GarageQual"].fillna(train_df["GarageQual"].mode()[0]) train_df["GarageCond"] = train_df["GarageCond"].fillna(train_df["GarageCond"].mode()[0]) train_df.isnull().values.sum() test_df.fillna(test_df.mode().iloc[0], inplace=True) test_df.isnull().values.sum() # ## Scalar Conversion from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split corr = train_df.corr() high_corr_features = corr.index[abs(corr["SalePrice"]) > 0.50] print(f"highly correlated feature:\n", high_corr_features) print(f"No. of highly correlated features:", len(high_corr_features)) X = train_df[high_corr_features.drop("SalePrice")] y = train_df[["SalePrice"]] test_df = test_df[high_corr_features.drop("SalePrice")] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=10 ) print(X_train.shape, X_test.shape) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.fit_transform(X_test) test_df = scaler.fit_transform(test_df) # ## Linear Regression Model from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train, y_train) print("Coefficients: \n", lr.coef_) pred = lr.predict(X_test) plt.scatter(y_test, pred) plt.xlabel("Y Test") plt.ylabel("Predicted Y") from sklearn import metrics print("MAE:", metrics.mean_absolute_error(y_test, pred)) print("MSE:", metrics.mean_squared_error(y_test, pred)) print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, pred))) # ## Overall Outcome prediction = lr.predict(test_df) prediction.shape print(prediction) ids = model_test["Id"] print(ids) Final = pd.DataFrame({"Id": ids}) Final = pd.DataFrame({"Sales Price": prediction.flatten()}) Final_sub = pd.DataFrame({"Id": ids, "SalePrice": prediction.flatten()}) Final_sub.head(10) Final_sub.to_csv("submission.csv", index="None")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/088/129088986.ipynb
null
null
[{"Id": 129088986, "ScriptId": 38374410, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11502637, "CreationDate": "05/10/2023 23:29:31", "VersionNumber": 6.0, "Title": "CS4650 pr 5", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 186.0, "LinesInsertedFromPrevious": 34.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 152.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # ## Loading the test and train data sets model_test = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ) model_train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) model_test model_train print(model_train.columns) print(len(model_train.columns), "fetures present in training dataset") print(model_test.columns) print(len(model_test.columns), "fetures present in testing dataset") print(model_test.shape, "shape of testing dataset") print(model_train.shape, "shape of training dataset") model_train.info() model_test.info() model_train.describe() model_train.duplicated().sum() model_test.duplicated().sum() print(model_train.isnull().sum()) print("-----------") print("-----------") print("-----------") print(model_test.isnull().sum()) dataframe = pd.DataFrame(model_train.isnull().sum().sort_values(ascending=False)) print(dataframe.to_markdown()) dataframe_test = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) print(dataframe_test.to_markdown()) null = model_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) print(dataframe_null.to_markdown()) # ## Removing Unnecssary Values col_to_drop = null[null > 50].keys() train_df = model_train.drop(col_to_drop, axis=1) null_test = model_test.isnull().sum() / model_test.shape[0] * 100 null_test dataframe_null_test = pd.DataFrame(null_test.sort_values(ascending=False)) print(dataframe_null_test.to_markdown()) col_to_drop_test = null_test[null_test > 50].keys() test_df = model_test.drop(col_to_drop, axis=1) len(train_df.columns) len(test_df.columns) train_df.columns[train_df.isnull().any()] len(train_df.columns[train_df.isnull().any()]) train_df.skew() train_df["LotFrontage"] = train_df["LotFrontage"].fillna(train_df["LotFrontage"].mean()) train_df["MasVnrArea"] = train_df["MasVnrArea"].fillna(train_df["MasVnrArea"].mean()) train_df["GarageYrBlt"] = train_df["GarageYrBlt"].fillna( train_df["GarageYrBlt"].median() ) train_df["MasVnrType"] = train_df["MasVnrType"].fillna(train_df["MasVnrType"].mode()[0]) train_df["BsmtQual"] = train_df["BsmtQual"].fillna(train_df["BsmtQual"].mode()[0]) train_df["BsmtCond"] = train_df["BsmtCond"].fillna(train_df["BsmtCond"].mode()[0]) train_df["BsmtExposure"] = train_df["BsmtExposure"].fillna( train_df["BsmtExposure"].mode()[0] ) train_df["BsmtFinType1"] = train_df["BsmtFinType1"].fillna( train_df["BsmtFinType1"].mode()[0] ) train_df["BsmtFinType2"] = train_df["BsmtFinType2"].fillna( train_df["BsmtFinType2"].mode()[0] ) train_df["Electrical"] = train_df["Electrical"].fillna(train_df["Electrical"].mode()[0]) train_df["FireplaceQu"] = train_df["FireplaceQu"].fillna( train_df["FireplaceQu"].mode()[0] ) train_df["GarageType"] = train_df["GarageType"].fillna(train_df["GarageType"].mode()[0]) train_df["GarageFinish"] = train_df["GarageFinish"].fillna( train_df["GarageFinish"].mode()[0] ) train_df["GarageQual"] = train_df["GarageQual"].fillna(train_df["GarageQual"].mode()[0]) train_df["GarageCond"] = train_df["GarageCond"].fillna(train_df["GarageCond"].mode()[0]) train_df.isnull().values.sum() test_df.fillna(test_df.mode().iloc[0], inplace=True) test_df.isnull().values.sum() # ## Scalar Conversion from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split corr = train_df.corr() high_corr_features = corr.index[abs(corr["SalePrice"]) > 0.50] print(f"highly correlated feature:\n", high_corr_features) print(f"No. of highly correlated features:", len(high_corr_features)) X = train_df[high_corr_features.drop("SalePrice")] y = train_df[["SalePrice"]] test_df = test_df[high_corr_features.drop("SalePrice")] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=10 ) print(X_train.shape, X_test.shape) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.fit_transform(X_test) test_df = scaler.fit_transform(test_df) # ## Linear Regression Model from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train, y_train) print("Coefficients: \n", lr.coef_) pred = lr.predict(X_test) plt.scatter(y_test, pred) plt.xlabel("Y Test") plt.ylabel("Predicted Y") from sklearn import metrics print("MAE:", metrics.mean_absolute_error(y_test, pred)) print("MSE:", metrics.mean_squared_error(y_test, pred)) print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, pred))) # ## Overall Outcome prediction = lr.predict(test_df) prediction.shape print(prediction) ids = model_test["Id"] print(ids) Final = pd.DataFrame({"Id": ids}) Final = pd.DataFrame({"Sales Price": prediction.flatten()}) Final_sub = pd.DataFrame({"Id": ids, "SalePrice": prediction.flatten()}) Final_sub.head(10) Final_sub.to_csv("submission.csv", index="None")
false
0
1,890
0
1,890
1,890
129066463
<jupyter_start><jupyter_text>Video Game Sales This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1]. Fields include * Rank - Ranking of overall sales * Name - The games name * Platform - Platform of the games release (i.e. PC,PS4, etc.) * Year - Year of the game's release * Genre - Genre of the game * Publisher - Publisher of the game * NA_Sales - Sales in North America (in millions) * EU_Sales - Sales in Europe (in millions) * JP_Sales - Sales in Japan (in millions) * Other_Sales - Sales in the rest of the world (in millions) * Global_Sales - Total worldwide sales. The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape. It is based on BeautifulSoup using Python. There are 16,598 records. 2 records were dropped due to incomplete information. [1]: http://www.vgchartz.com/ Kaggle dataset identifier: videogamesales <jupyter_code>import pandas as pd df = pd.read_csv('videogamesales/vgsales.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 16598 entries, 0 to 16597 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Rank 16598 non-null int64 1 Name 16598 non-null object 2 Platform 16598 non-null object 3 Year 16327 non-null float64 4 Genre 16598 non-null object 5 Publisher 16540 non-null object 6 NA_Sales 16598 non-null float64 7 EU_Sales 16598 non-null float64 8 JP_Sales 16598 non-null float64 9 Other_Sales 16598 non-null float64 10 Global_Sales 16598 non-null float64 dtypes: float64(6), int64(1), object(4) memory usage: 1.4+ MB <jupyter_text>Examples: { "Rank": 1, "Name": "Wii Sports", "Platform": "Wii", "Year": 2006, "Genre": "Sports", "Publisher": "Nintendo", "NA_Sales": 41.49, "EU_Sales": 29.02, "JP_Sales": 3.77, "Other_Sales": 8.46, "Global_Sales": 82.74 } { "Rank": 2, "Name": "Super Mario Bros.", "Platform": "NES", "Year": 1985, "Genre": "Platform", "Publisher": "Nintendo", "NA_Sales": 29.08, "EU_Sales": 3.58, "JP_Sales": 6.8100000000000005, "Other_Sales": 0.77, "Global_Sales": 40.24 } { "Rank": 3, "Name": "Mario Kart Wii", "Platform": "Wii", "Year": 2008, "Genre": "Racing", "Publisher": "Nintendo", "NA_Sales": 15.85, "EU_Sales": 12.88, "JP_Sales": 3.79, "Other_Sales": 3.31, "Global_Sales": 35.82 } { "Rank": 4, "Name": "Wii Sports Resort", "Platform": "Wii", "Year": 2009, "Genre": "Sports", "Publisher": "Nintendo", "NA_Sales": 15.75, "EU_Sales": 11.01, "JP_Sales": 3.2800000000000002, "Other_Sales": 2.96, "Global_Sales": 33.0 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv") df # ## Which company is the most common video game publisher? df.value_counts("Publisher").idxmax() # ## What’s the most common platform? df.value_counts("Platform").idxmax() # ## What about the most common genre? df.value_counts("Genre").idxmax() # ## What are the top 20 highest grossing games? # top_20 = df.nlargest(20, "Global_Sales") top_20 # ## For North American video game sales, what’s the median? median_na_sales = df["NA_Sales"].median() median_na_sales # ### Provide a secondary output showing ten games surrounding the median sales output. median_sales = df["Global_Sales"].median() ten_games_surrounding_median = ( df[df["Global_Sales"].between(median_sales - 0.05, median_sales + 0.05)] .sort_values("Global_Sales", ascending=False) .head(10) ) ten_games_surrounding_median
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/066/129066463.ipynb
videogamesales
gregorut
[{"Id": 129066463, "ScriptId": 38364915, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10647506, "CreationDate": "05/10/2023 17:48:19", "VersionNumber": 1.0, "Title": "vg-stats", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 50.0, "LinesInsertedFromPrevious": 50.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184793832, "KernelVersionId": 129066463, "SourceDatasetVersionId": 618}]
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv") df # ## Which company is the most common video game publisher? df.value_counts("Publisher").idxmax() # ## What’s the most common platform? df.value_counts("Platform").idxmax() # ## What about the most common genre? df.value_counts("Genre").idxmax() # ## What are the top 20 highest grossing games? # top_20 = df.nlargest(20, "Global_Sales") top_20 # ## For North American video game sales, what’s the median? median_na_sales = df["NA_Sales"].median() median_na_sales # ### Provide a secondary output showing ten games surrounding the median sales output. median_sales = df["Global_Sales"].median() ten_games_surrounding_median = ( df[df["Global_Sales"].between(median_sales - 0.05, median_sales + 0.05)] .sort_values("Global_Sales", ascending=False) .head(10) ) ten_games_surrounding_median
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
true
1
<start_data_description><data_path>videogamesales/vgsales.csv: <column_names> ['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales'] <column_types> {'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'} <dataframe_Summary> {'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}} <dataframe_info> RangeIndex: 16598 entries, 0 to 16597 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Rank 16598 non-null int64 1 Name 16598 non-null object 2 Platform 16598 non-null object 3 Year 16327 non-null float64 4 Genre 16598 non-null object 5 Publisher 16540 non-null object 6 NA_Sales 16598 non-null float64 7 EU_Sales 16598 non-null float64 8 JP_Sales 16598 non-null float64 9 Other_Sales 16598 non-null float64 10 Global_Sales 16598 non-null float64 dtypes: float64(6), int64(1), object(4) memory usage: 1.4+ MB <some_examples> {'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}} <end_description>
464
0
1,578
464
129066401
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from IPython import get_ipython import plotly.express as px import plotly.graph_objects as go import warnings warnings.filterwarnings("ignore") from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import ( RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, ) from xgboost import XGBRegressor from catboost import CatBoostRegressor from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.preprocessing import StandardScaler import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) train_df = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv").drop( columns="id" ) train_df.head(5) test_df = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") test_df.head() train_df.info() train_df.isna().sum() train_df.duplicated().sum() train_df.describe().T train_df.columns for i in train_df.columns: plt.figure(figsize=(13, 7)) sns.histplot(data=train_df[i], kde=True, multiple="stack") plt.xticks(rotation=90) plt.show() sns.boxplot(x="RainingDays", y="fruitset", data=train_df) plt.xlabel("Raining Days") plt.ylabel("Fruitset (%)") plt.title("Raining Days vs Fruitset") plt.show() col = ["clonesize", "honeybee", "bumbles", "andrena", "osmia", "seeds"] fig, axs = plt.subplots(ncols=3, nrows=2, figsize=(10, 4)) axs = axs.flatten() for i, col_name in enumerate(col): sns.boxplot(x=train_df[col_name], ax=axs[i], color="skyblue") sns.stripplot( x=train_df[col_name][train_df[col_name] > train_df[col_name].quantile(0.75)], ax=axs[i], color="red", size=4, ) axs[i].set_title(col_name) if i == len(col) - 1: break fig.tight_layout() # Show the plot plt.show() fig, axs = plt.subplots(ncols=3, nrows=2, figsize=(12, 8)) axs = axs.flatten() for i, col in enumerate(train_df.columns[:-1]): if i < len(axs): sns.violinplot(y=train_df[col], ax=axs[i], color="skyblue") axs[i].set_title(col) for ax in axs[len(train_df.columns) - 1 :]: ax.remove() fig.tight_layout() # Show the plot plt.show() plt.figure(figsize=(20, 12)) sns.heatmap(train_df.corr(), annot=True, cmap="coolwarm") x = train_df.drop(columns=["yield"]) y = train_df["yield"] from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.33, random_state=42 ) print("x_train - > ", x_train.shape) print("x_test - > ", x_test.shape) print("y_train - > ", y_train.shape) print("y_test - > ", y_test.shape) training_score = [] testing_score = [] def model_prediction(model): model.fit(x_train, y_train) x_train_pred = model.predict(x_train) x_test_pred = model.predict(x_test) a = r2_score(y_train, x_train_pred) * 100 b = r2_score(y_test, x_test_pred) * 100 training_score.append(a) testing_score.append(b) print(f"r2_Score of {model} model on Training Data is:", a) print(f"r2_Score of {model} model on Testing Data is:", b) model_prediction(LinearRegression()) model_prediction(DecisionTreeRegressor()) model_prediction(RandomForestRegressor()) model_prediction(AdaBoostRegressor()) model_prediction(GradientBoostingRegressor()) model_prediction(LGBMRegressor()) model_prediction(XGBRegressor()) model_prediction(CatBoostRegressor(verbose=False)) models = [ "Linear Regression", "Decision Tree", "Random Forest", "Ada Boost", "Gradient Boost", "LGBM", "XGBoost", "CatBoost", ] df = pd.DataFrame( { "Algorithms": models, "Training Score": training_score, "Testing Score": testing_score, } ) df df.plot( x="Algorithms", y=["Training Score", "Testing Score"], figsize=(16, 6), kind="bar", title="Performance Visualization of Different Models", colormap="Set1", ) plt.show() from sklearn.metrics import mean_absolute_error # Instantiate the model lgbm_model = LGBMRegressor() # Fit the model to the training data lgbm_model.fit(x_train, y_train) # Use the model to make predictions on the test data y_pred = lgbm_model.predict(x_test) # Calculate the mean absolute error of the model score = mean_absolute_error(y_test, y_pred) score from sklearn.metrics import mean_absolute_error # Instantiate the model gb_model = GradientBoostingRegressor() # Fit the model to the training data gb_model.fit(x_train, y_train) # Use the model to make predictions on the test data gb_y_pred = gb_model.predict(x_test) # Calculate the mean absolute error of the model score = mean_absolute_error(y_test, gb_y_pred) score sample = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv") sample.head() prediction = gb_model.predict(test_df.drop("id", axis=1)) prediction submission = pd.DataFrame({"id": test_df.id, "yield": prediction}) submission.head() submission.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/066/129066401.ipynb
null
null
[{"Id": 129066401, "ScriptId": 38364914, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7511659, "CreationDate": "05/10/2023 17:47:42", "VersionNumber": 1.0, "Title": "Prediction of\ud83e\uded0Blueberry Yield", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 192.0, "LinesInsertedFromPrevious": 192.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 6}]
null
null
null
null
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from IPython import get_ipython import plotly.express as px import plotly.graph_objects as go import warnings warnings.filterwarnings("ignore") from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import ( RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, ) from xgboost import XGBRegressor from catboost import CatBoostRegressor from lightgbm import LGBMRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.preprocessing import StandardScaler import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) train_df = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv").drop( columns="id" ) train_df.head(5) test_df = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") test_df.head() train_df.info() train_df.isna().sum() train_df.duplicated().sum() train_df.describe().T train_df.columns for i in train_df.columns: plt.figure(figsize=(13, 7)) sns.histplot(data=train_df[i], kde=True, multiple="stack") plt.xticks(rotation=90) plt.show() sns.boxplot(x="RainingDays", y="fruitset", data=train_df) plt.xlabel("Raining Days") plt.ylabel("Fruitset (%)") plt.title("Raining Days vs Fruitset") plt.show() col = ["clonesize", "honeybee", "bumbles", "andrena", "osmia", "seeds"] fig, axs = plt.subplots(ncols=3, nrows=2, figsize=(10, 4)) axs = axs.flatten() for i, col_name in enumerate(col): sns.boxplot(x=train_df[col_name], ax=axs[i], color="skyblue") sns.stripplot( x=train_df[col_name][train_df[col_name] > train_df[col_name].quantile(0.75)], ax=axs[i], color="red", size=4, ) axs[i].set_title(col_name) if i == len(col) - 1: break fig.tight_layout() # Show the plot plt.show() fig, axs = plt.subplots(ncols=3, nrows=2, figsize=(12, 8)) axs = axs.flatten() for i, col in enumerate(train_df.columns[:-1]): if i < len(axs): sns.violinplot(y=train_df[col], ax=axs[i], color="skyblue") axs[i].set_title(col) for ax in axs[len(train_df.columns) - 1 :]: ax.remove() fig.tight_layout() # Show the plot plt.show() plt.figure(figsize=(20, 12)) sns.heatmap(train_df.corr(), annot=True, cmap="coolwarm") x = train_df.drop(columns=["yield"]) y = train_df["yield"] from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.33, random_state=42 ) print("x_train - > ", x_train.shape) print("x_test - > ", x_test.shape) print("y_train - > ", y_train.shape) print("y_test - > ", y_test.shape) training_score = [] testing_score = [] def model_prediction(model): model.fit(x_train, y_train) x_train_pred = model.predict(x_train) x_test_pred = model.predict(x_test) a = r2_score(y_train, x_train_pred) * 100 b = r2_score(y_test, x_test_pred) * 100 training_score.append(a) testing_score.append(b) print(f"r2_Score of {model} model on Training Data is:", a) print(f"r2_Score of {model} model on Testing Data is:", b) model_prediction(LinearRegression()) model_prediction(DecisionTreeRegressor()) model_prediction(RandomForestRegressor()) model_prediction(AdaBoostRegressor()) model_prediction(GradientBoostingRegressor()) model_prediction(LGBMRegressor()) model_prediction(XGBRegressor()) model_prediction(CatBoostRegressor(verbose=False)) models = [ "Linear Regression", "Decision Tree", "Random Forest", "Ada Boost", "Gradient Boost", "LGBM", "XGBoost", "CatBoost", ] df = pd.DataFrame( { "Algorithms": models, "Training Score": training_score, "Testing Score": testing_score, } ) df df.plot( x="Algorithms", y=["Training Score", "Testing Score"], figsize=(16, 6), kind="bar", title="Performance Visualization of Different Models", colormap="Set1", ) plt.show() from sklearn.metrics import mean_absolute_error # Instantiate the model lgbm_model = LGBMRegressor() # Fit the model to the training data lgbm_model.fit(x_train, y_train) # Use the model to make predictions on the test data y_pred = lgbm_model.predict(x_test) # Calculate the mean absolute error of the model score = mean_absolute_error(y_test, y_pred) score from sklearn.metrics import mean_absolute_error # Instantiate the model gb_model = GradientBoostingRegressor() # Fit the model to the training data gb_model.fit(x_train, y_train) # Use the model to make predictions on the test data gb_y_pred = gb_model.predict(x_test) # Calculate the mean absolute error of the model score = mean_absolute_error(y_test, gb_y_pred) score sample = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv") sample.head() prediction = gb_model.predict(test_df.drop("id", axis=1)) prediction submission = pd.DataFrame({"id": test_df.id, "yield": prediction}) submission.head() submission.to_csv("submission.csv", index=False)
false
0
1,690
6
1,690
1,690
129066768
# *** # ## **Рекомендательная система коллаборативной фильтрации** # *** # В этом ноутбуке обобщены результаты рекомендательной системы коллаборативной фильтрации, реализованной с помощью Spark MLlib: насколько хорошо она масштабируется и работает (для генерации релевантных рекомендаций пользователей) на наборе данных MovieLens. # # **Конфигурация** # **Установка Apache Spark** # import os # Ниже устанавливается Apache Spark (и его зависимости, такие как Java JDK 8) для использования в Google Colab. # Java JDK # Apache Spark # Смена версии с JDK-11 на JDK-8 # Установка переменных окружения os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["SPARK_HOME"] = "/content/spark-3.2.4-bin-hadoop2.7" # **Запуск сеанса Spark** import findspark findspark.init() import pyspark from pyspark.sql import SQLContext sc = pyspark.SparkContext(appName="sd701-RecoSys-Models") sqlContext = SQLContext(sc) print("Master : ", sc.master) print("Cores : ", sc.defaultParallelism) sqlContext.sparkSession.conf.set("spark.sql.autoBroadcastJoinThreshold", -1) # **Пакеты** # Пакеты по умолчанию import math import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.ticker as ticker import seaborn as sns import random from pprint import pprint from matplotlib.lines import Line2D # Утилиты PySpark import pyspark.sql.functions as F from pyspark.sql.types import * from pyspark.ml.recommendation import ALS, ALSModel from pyspark.ml.evaluation import RegressionEvaluator, BinaryClassificationEvaluator from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit from pyspark.mllib.evaluation import RegressionMetrics, RankingMetrics SEED = 1492 plt.style.use("seaborn") # **Монтирование Google диска в файловую систему** from google.colab import drive drive.mount("/content/drive") # Проверка файлов # Местонахождение директории с данными DATA_PATH = "drive/My Drive/MovieLens/ml-latest/" # Местонахождение дирекории для результатов RESULTS_PATH = "drive/My Drive/MovieLens/" # # **1. Предобработка данных** # *** # Рассматриваются следующие шаги: # - Преобразование нескольких строковых столбцов в целочисленные столбцы (требуется для обучения модели) # - Для оценки точности ранжирования: рассматриваем любой фильм с рейтингом выше 3 как релевантный для пользователя (таким образом, он получает оценку 1). В противном случае он получает значение 0. class MovieLensDatasets(object): """ Класс для загрузки и предварительной обработки набора данных """ def __init__(self, ratings, movies, links, debug=True, debugLimit=10000): # Загрузка if debug: debugLimit = debugLimit ratings = ratings.limit(debugLimit) else: ratings = ratings self.ratings = ratings self.movies = movies self.links = links # Новый датафрейм users = ratings.select("userId").distinct() self.users = users def preprocessing(self): # Препроцессинг рейтингов self.ratings = ( self.ratings.withColumn("rating", F.col("rating").cast("double")) .drop("timestamp") .withColumn("userId", F.col("userId").cast("int")) .withColumn("movieId", F.col("movieId").cast("int")) ) # Бинаризация рейтингов MovieLens (если рейтинг >= 3.0, то 1.0, иначе 0.0) udf_scale_ratings = F.udf(lambda x: x - 2.5, DoubleType()) udf_binary_ratings = F.udf(lambda x: 1.0 if x > 0.0 else 0.0, DoubleType()) self.ratings = self.ratings.withColumn( "ratingsScaled", udf_scale_ratings(F.col("rating")) ).withColumn("ratingsBinary", udf_binary_ratings(F.col("ratingsScaled"))) def get_ratings(self): return self.ratings def get_movies(self): return self.movies # Отображение нулевых значений def spark_df_display_null_values(sparkDf): print("NaN values ?") sparkDf.select( [F.count(F.when(F.isnan(c), c)).alias(c) for c in sparkDf.columns] ).show() print("Null values ?") sparkDf.select( [F.count(F.when(F.isnull(c), c)).alias(c) for c in sparkDf.columns] ).show() # Загрузка данных: debug = False movies = ( sqlContext.read.format("csv") .option("header", "true") .load(DATA_PATH + "movies.csv") ) links = ( sqlContext.read.format("csv").option("header", "true").load(DATA_PATH + "links.csv") ) ratings = ( sqlContext.read.format("csv") .option("header", "true") .load(DATA_PATH + "ratings.csv") ) movieLensDatasets = MovieLensDatasets( ratings=ratings, movies=movies, links=links, debug=debug ) movieLensDatasets.preprocessing() dfRatings = movieLensDatasets.get_ratings() dfMovies = movieLensDatasets.get_movies() # # **2. Тренировка модели** # *** # ## **2.1. Разделение на тренировочный/тестовый набор** ratingsPrepare = dfRatings.withColumn("userId", F.col("userId").cast("int")).withColumn( "movieId", F.col("movieId").cast("int") ) dfRatingsTrain, dfRatingsTest = ratingsPrepare.randomSplit([0.8, 0.2], seed=SEED) # ## **2.2. Метрики** # **Explanation** # Следующие показатели, реализованные в Spark MLlib, работают только в том случае, если вы рассматриваете проблему с системой рекомендаций как двоичную: либо алгоритм рекомендовал соответствующий элемент, либо нет. Он делает это с помощью функции релевантности. # Предположим, у пользователя есть список любимых фильмов $D = \{ d_1, ..., d_ {N} \}$ (мы считаем, что любой фильм с рейтингом выше 3 будет считаться релевантным). Теперь наша рекомендательная система сгенерировала список из $Q$ элементов фильма $Z=\{z_1, ..., z_{Q}\}$. Мы считаем, что элемент $m$ релевантен, если: # $$ rel_D(z) = \begin{cases} # 1 & \text{if } z \in D \\ # 0 & \text{else} # \end{cases} $$ # ##### **Precision@$k$** # Измеряет (в среднем для всех пользователей) количество товаров, рекомендованных пользователем в размере $k$, которые имеют отношение к пользователю (учитывая его прошлую оценку фильмов, которые ему нравились или не нравиться). Фактический рейтинг рекомендуемых товаров не учитывается. # $$ p(k) = \frac{1}{M} \sum_{i=0}^{M-1} \frac{1}{k} \sum_{j=0}^{\min(|D|,k)-1} rel_{D_i}(Z_i(j)) $$ # ##### **Normalized Discounted Cumulative Gain** # Аналогично precision, только на этот раз при оценке принимается во внимание фактический порядок рекомендуемых товаров. # $$ NDCG(k) = \frac{1}{M} \sum_{i=0}^{M-1} \frac{1}{IDCG(D_i,k)} \sum_{j=0}^{n-1} \frac{rel_{D_i}(Z_i(j))}{\ln(j+1)}$$ # $$ n =\min (\max(|Z_i|, |D_i|, k)), \qquad IDCG(D,k) = \sum_{j=0}^{\min(|D|,k)-1} \frac{1}{\ln(j+1)} $$ # ##### **RMSE** # RMSE имеет отношение только к совместной фильтрации с моделями скрытых факторов (поскольку он пытается восстановить рейтинги и использует его в качестве функции затрат при оптимизации). # $$ RMSE = \frac{1}{N} \sum_{i=1}^{N} (y_i - \hat y_i )^2 $$ # ##### **Catalog Coverage** # Измеряет процент фильмов, рекомендованных как минимум для одного пользователя. По сути, он сообщает, сколько элементов из всего каталога из 58 000 фильмов на самом деле рекомендовано пользователям $m$ с помощью алгоритма. Существует компромисс между выработкой соответствующих рекомендаций и охватом большей части доступного набора позиций. # $$ CC = \frac{|\cup_{u=1}^m T_u|}{n} $$ # ### Метрики точности def catalog_coverage(predicted, catalog, k): sampling = random.choices(predicted, k=k) predicted_flattened = [p for sublist in sampling for p in sublist] L_predictions = len(set(predicted_flattened)) catalog_coverage = L_predictions / (len(catalog) * 1.0) return catalog_coverage def get_rec_sys_results(relevantDocumentsDf, dfMovies): """ Возвращает словарь показателей рекомендательной системы """ # Преобразует Spark Dataframe в Spark RDD relevantDocuments = relevantDocumentsDf.rdd.map( lambda row: (row.predictions, row.groundTruth) ) # Get Catalog Coverage moviesCatalog = dfMovies.select("movieId").orderBy("movieId").distinct().toPandas() predictions = relevantDocumentsDf.select("predictions").toPandas() predictionsList = predictions.values.tolist() predictionsList = [sublist[0] for sublist in predictionsList] cc = catalog_coverage(predicted=predictionsList, catalog=moviesCatalog, k=100) # Результаты metrics = RankingMetrics(relevantDocuments) pk20 = metrics.precisionAt(20) ndcg20 = metrics.ndcgAt(20) results = dict(Pk20=pk20, NDCGk20=ndcg20, CC=cc) return results def format_recommendations(rowPreds): rowPredsList = [row.movieId for row in rowPreds] return rowPredsList udf_format_recommendations = F.udf( lambda x: format_recommendations(x), ArrayType(IntegerType()) ) # ## **2.3. Коллаборативная фильтрация на основе модели** # **Коллаборативная фильтрация** # Такой подход генерирует рекомендации для пользователей, находя других пользователей со схожими вкусами. Таким образом, модели CF полностью независимы от характеристик товара. Такие типовые рекомендации для пользователей, скорее всего, сформируют более разнообразные вкусы и позволят пользователю выйти за рамки своих обычных предпочтений. Однако малоизвестные фильмы, скорее всего, будут проигнорированы. # **Матричная факторизация** # Существует ряд методов для реализации моделей CF. Одним из таких методов является матричная факторизация. Предположим, мы выражаем наши пользовательские данные в виде матрицы оценок пользовательских элементов $R\in\mathbb{R}^{m\times n}$ с $ m$ пользователями и $ n$ элементами (здесь фильмы), где $R_ {i,j}$ обозначает пользователя $i$'рейтинг s для фильма $j$. Мы хотим выразить эту матрицу $ R$ в произведении меньшей размерности из 2 матриц $U\in \mathbb {R} ^ {m\times d}$ и $V\in\mathbb {R} ^ {n\times d}$, более информативной скрытой структуры, скрытой в данные (например, крупномасштабная идентификация сходства пользователей) для нашей рекомендательной системы. Чтобы найти эту скрытую структуру, нам нужно минимизировать следующий критерий: # $$ \min_{U\in\mathbb{R}^{m \times d},V\in\mathbb{R}^{n\times d}} \sum_{i,j} (R_{i,j} - \langle U_i,V_j \rangle)^2$$ # Одной из основных проблем коллабораивной фильтрации является ограниченность данных:среднестатистический зритель смотрит лишь малое количество фильмов из доступного пула. Таким образом, матрица рейтингов $R$ полна пропущенных значений. # **Alternating Least Squares** # На самом деле это хорошо изученная проблема в сообществе исследователей машинного обучения. Очень популярным подходом является * Взвешенная регуляризация с чередованием наименьших квадратов* (ALS-WR). Кратко: # - Может быть легко распределен (аналогично *Стохастическому градиентному спуску*) # - Предотвращает переобучение с помощью регуляризации Тихонова # - Может легко игнорировать пропущенные записи (в отличие от SGD) # - Важно отметить, что по сравнению с SGD он демонстрирует более быструю конвергенцию # $$ \min_{U\in\mathbb{R}^{m \times d},V\in\mathbb{R}^{n\times d}} \sum_{(i,j)|R_{i,j}\neq 0} (R_{i,j} - \langle U_i,V_j \rangle)^2 + \lambda \cdot\Bigl(\sum_i n_{u_i}\| U_i \|^2 + \sum_j n_{v_j}\| V_j \|^2\Bigl) $$ # ### **a) Тренировка модели** # tempALS = ALS( maxIter=10, rank=10, regParam=0.1, nonnegative=True, userCol="userId", itemCol="movieId", ratingCol="rating", coldStartStrategy="drop", implicitPrefs=False, seed=SEED, ) mlALSFitted = tempALS.fit(dfRatingsTrain) mlALSFitted.save(RESULTS_PATH + "RESULT") mlALSFitted = ALSModel.load(RESULTS_PATH + "RESULT") # ### **b) RMSE** # **RMSE на тестовом наборе** predictions = mlALSFitted.transform(dfRatingsTest) evaluator = RegressionEvaluator( metricName="rmse", labelCol="rating", predictionCol="prediction" ) rmse = evaluator.evaluate(predictions) print("RMSE (Test Set):", rmse) # ### **c) Оценка рекомендаций** resultsALS = mlALSFitted.recommendForAllUsers(20) resultsALS = resultsALS.withColumn( "recommendations", udf_format_recommendations(F.col("recommendations")) ).toDF("userId", "predictions") # Наиболее часто рекомендуемые фильмы (для тестового набора): resultsALSExpanded = ( resultsALS.withColumn("movieId", F.explode("predictions")) .drop("predictions") .join(dfMovies, "movieId") ) resultsALSKdf = resultsALSExpanded.to_koalas() MostRecommendedMoviesForAllUsers = resultsALSKdf.groupby(["movieId", "title"])[ "userId" ].count() MostRecommendedMoviesForAllUsers = MostRecommendedMoviesForAllUsers.sort_values( ascending=False ) MostRecommendedMoviesForAllUsers.head(20) # Давайте проверим, соответствуют ли рекомендации CF вкусам каждого пользователя: resultsALS = resultsALS.join( dfRatingsTest.filter(F.col("ratingsBinary") == 1.0) .withColumn("movieId", F.col("movieId").cast("int")) .groupby("userId") .agg(F.collect_list("movieId").alias("groundTruth")), "userId", ) # resultsALS.orderBy('userId').limit(10).show(10) resultsALSMetrics = get_rec_sys_results(resultsALS, dfMovies) pprint(resultsALSMetrics)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/066/129066768.ipynb
null
null
[{"Id": 129066768, "ScriptId": 38367813, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14627050, "CreationDate": "05/10/2023 17:51:27", "VersionNumber": 1.0, "Title": "VK Intership", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 390.0, "LinesInsertedFromPrevious": 390.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# *** # ## **Рекомендательная система коллаборативной фильтрации** # *** # В этом ноутбуке обобщены результаты рекомендательной системы коллаборативной фильтрации, реализованной с помощью Spark MLlib: насколько хорошо она масштабируется и работает (для генерации релевантных рекомендаций пользователей) на наборе данных MovieLens. # # **Конфигурация** # **Установка Apache Spark** # import os # Ниже устанавливается Apache Spark (и его зависимости, такие как Java JDK 8) для использования в Google Colab. # Java JDK # Apache Spark # Смена версии с JDK-11 на JDK-8 # Установка переменных окружения os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["SPARK_HOME"] = "/content/spark-3.2.4-bin-hadoop2.7" # **Запуск сеанса Spark** import findspark findspark.init() import pyspark from pyspark.sql import SQLContext sc = pyspark.SparkContext(appName="sd701-RecoSys-Models") sqlContext = SQLContext(sc) print("Master : ", sc.master) print("Cores : ", sc.defaultParallelism) sqlContext.sparkSession.conf.set("spark.sql.autoBroadcastJoinThreshold", -1) # **Пакеты** # Пакеты по умолчанию import math import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.ticker as ticker import seaborn as sns import random from pprint import pprint from matplotlib.lines import Line2D # Утилиты PySpark import pyspark.sql.functions as F from pyspark.sql.types import * from pyspark.ml.recommendation import ALS, ALSModel from pyspark.ml.evaluation import RegressionEvaluator, BinaryClassificationEvaluator from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit from pyspark.mllib.evaluation import RegressionMetrics, RankingMetrics SEED = 1492 plt.style.use("seaborn") # **Монтирование Google диска в файловую систему** from google.colab import drive drive.mount("/content/drive") # Проверка файлов # Местонахождение директории с данными DATA_PATH = "drive/My Drive/MovieLens/ml-latest/" # Местонахождение дирекории для результатов RESULTS_PATH = "drive/My Drive/MovieLens/" # # **1. Предобработка данных** # *** # Рассматриваются следующие шаги: # - Преобразование нескольких строковых столбцов в целочисленные столбцы (требуется для обучения модели) # - Для оценки точности ранжирования: рассматриваем любой фильм с рейтингом выше 3 как релевантный для пользователя (таким образом, он получает оценку 1). В противном случае он получает значение 0. class MovieLensDatasets(object): """ Класс для загрузки и предварительной обработки набора данных """ def __init__(self, ratings, movies, links, debug=True, debugLimit=10000): # Загрузка if debug: debugLimit = debugLimit ratings = ratings.limit(debugLimit) else: ratings = ratings self.ratings = ratings self.movies = movies self.links = links # Новый датафрейм users = ratings.select("userId").distinct() self.users = users def preprocessing(self): # Препроцессинг рейтингов self.ratings = ( self.ratings.withColumn("rating", F.col("rating").cast("double")) .drop("timestamp") .withColumn("userId", F.col("userId").cast("int")) .withColumn("movieId", F.col("movieId").cast("int")) ) # Бинаризация рейтингов MovieLens (если рейтинг >= 3.0, то 1.0, иначе 0.0) udf_scale_ratings = F.udf(lambda x: x - 2.5, DoubleType()) udf_binary_ratings = F.udf(lambda x: 1.0 if x > 0.0 else 0.0, DoubleType()) self.ratings = self.ratings.withColumn( "ratingsScaled", udf_scale_ratings(F.col("rating")) ).withColumn("ratingsBinary", udf_binary_ratings(F.col("ratingsScaled"))) def get_ratings(self): return self.ratings def get_movies(self): return self.movies # Отображение нулевых значений def spark_df_display_null_values(sparkDf): print("NaN values ?") sparkDf.select( [F.count(F.when(F.isnan(c), c)).alias(c) for c in sparkDf.columns] ).show() print("Null values ?") sparkDf.select( [F.count(F.when(F.isnull(c), c)).alias(c) for c in sparkDf.columns] ).show() # Загрузка данных: debug = False movies = ( sqlContext.read.format("csv") .option("header", "true") .load(DATA_PATH + "movies.csv") ) links = ( sqlContext.read.format("csv").option("header", "true").load(DATA_PATH + "links.csv") ) ratings = ( sqlContext.read.format("csv") .option("header", "true") .load(DATA_PATH + "ratings.csv") ) movieLensDatasets = MovieLensDatasets( ratings=ratings, movies=movies, links=links, debug=debug ) movieLensDatasets.preprocessing() dfRatings = movieLensDatasets.get_ratings() dfMovies = movieLensDatasets.get_movies() # # **2. Тренировка модели** # *** # ## **2.1. Разделение на тренировочный/тестовый набор** ratingsPrepare = dfRatings.withColumn("userId", F.col("userId").cast("int")).withColumn( "movieId", F.col("movieId").cast("int") ) dfRatingsTrain, dfRatingsTest = ratingsPrepare.randomSplit([0.8, 0.2], seed=SEED) # ## **2.2. Метрики** # **Explanation** # Следующие показатели, реализованные в Spark MLlib, работают только в том случае, если вы рассматриваете проблему с системой рекомендаций как двоичную: либо алгоритм рекомендовал соответствующий элемент, либо нет. Он делает это с помощью функции релевантности. # Предположим, у пользователя есть список любимых фильмов $D = \{ d_1, ..., d_ {N} \}$ (мы считаем, что любой фильм с рейтингом выше 3 будет считаться релевантным). Теперь наша рекомендательная система сгенерировала список из $Q$ элементов фильма $Z=\{z_1, ..., z_{Q}\}$. Мы считаем, что элемент $m$ релевантен, если: # $$ rel_D(z) = \begin{cases} # 1 & \text{if } z \in D \\ # 0 & \text{else} # \end{cases} $$ # ##### **Precision@$k$** # Измеряет (в среднем для всех пользователей) количество товаров, рекомендованных пользователем в размере $k$, которые имеют отношение к пользователю (учитывая его прошлую оценку фильмов, которые ему нравились или не нравиться). Фактический рейтинг рекомендуемых товаров не учитывается. # $$ p(k) = \frac{1}{M} \sum_{i=0}^{M-1} \frac{1}{k} \sum_{j=0}^{\min(|D|,k)-1} rel_{D_i}(Z_i(j)) $$ # ##### **Normalized Discounted Cumulative Gain** # Аналогично precision, только на этот раз при оценке принимается во внимание фактический порядок рекомендуемых товаров. # $$ NDCG(k) = \frac{1}{M} \sum_{i=0}^{M-1} \frac{1}{IDCG(D_i,k)} \sum_{j=0}^{n-1} \frac{rel_{D_i}(Z_i(j))}{\ln(j+1)}$$ # $$ n =\min (\max(|Z_i|, |D_i|, k)), \qquad IDCG(D,k) = \sum_{j=0}^{\min(|D|,k)-1} \frac{1}{\ln(j+1)} $$ # ##### **RMSE** # RMSE имеет отношение только к совместной фильтрации с моделями скрытых факторов (поскольку он пытается восстановить рейтинги и использует его в качестве функции затрат при оптимизации). # $$ RMSE = \frac{1}{N} \sum_{i=1}^{N} (y_i - \hat y_i )^2 $$ # ##### **Catalog Coverage** # Измеряет процент фильмов, рекомендованных как минимум для одного пользователя. По сути, он сообщает, сколько элементов из всего каталога из 58 000 фильмов на самом деле рекомендовано пользователям $m$ с помощью алгоритма. Существует компромисс между выработкой соответствующих рекомендаций и охватом большей части доступного набора позиций. # $$ CC = \frac{|\cup_{u=1}^m T_u|}{n} $$ # ### Метрики точности def catalog_coverage(predicted, catalog, k): sampling = random.choices(predicted, k=k) predicted_flattened = [p for sublist in sampling for p in sublist] L_predictions = len(set(predicted_flattened)) catalog_coverage = L_predictions / (len(catalog) * 1.0) return catalog_coverage def get_rec_sys_results(relevantDocumentsDf, dfMovies): """ Возвращает словарь показателей рекомендательной системы """ # Преобразует Spark Dataframe в Spark RDD relevantDocuments = relevantDocumentsDf.rdd.map( lambda row: (row.predictions, row.groundTruth) ) # Get Catalog Coverage moviesCatalog = dfMovies.select("movieId").orderBy("movieId").distinct().toPandas() predictions = relevantDocumentsDf.select("predictions").toPandas() predictionsList = predictions.values.tolist() predictionsList = [sublist[0] for sublist in predictionsList] cc = catalog_coverage(predicted=predictionsList, catalog=moviesCatalog, k=100) # Результаты metrics = RankingMetrics(relevantDocuments) pk20 = metrics.precisionAt(20) ndcg20 = metrics.ndcgAt(20) results = dict(Pk20=pk20, NDCGk20=ndcg20, CC=cc) return results def format_recommendations(rowPreds): rowPredsList = [row.movieId for row in rowPreds] return rowPredsList udf_format_recommendations = F.udf( lambda x: format_recommendations(x), ArrayType(IntegerType()) ) # ## **2.3. Коллаборативная фильтрация на основе модели** # **Коллаборативная фильтрация** # Такой подход генерирует рекомендации для пользователей, находя других пользователей со схожими вкусами. Таким образом, модели CF полностью независимы от характеристик товара. Такие типовые рекомендации для пользователей, скорее всего, сформируют более разнообразные вкусы и позволят пользователю выйти за рамки своих обычных предпочтений. Однако малоизвестные фильмы, скорее всего, будут проигнорированы. # **Матричная факторизация** # Существует ряд методов для реализации моделей CF. Одним из таких методов является матричная факторизация. Предположим, мы выражаем наши пользовательские данные в виде матрицы оценок пользовательских элементов $R\in\mathbb{R}^{m\times n}$ с $ m$ пользователями и $ n$ элементами (здесь фильмы), где $R_ {i,j}$ обозначает пользователя $i$'рейтинг s для фильма $j$. Мы хотим выразить эту матрицу $ R$ в произведении меньшей размерности из 2 матриц $U\in \mathbb {R} ^ {m\times d}$ и $V\in\mathbb {R} ^ {n\times d}$, более информативной скрытой структуры, скрытой в данные (например, крупномасштабная идентификация сходства пользователей) для нашей рекомендательной системы. Чтобы найти эту скрытую структуру, нам нужно минимизировать следующий критерий: # $$ \min_{U\in\mathbb{R}^{m \times d},V\in\mathbb{R}^{n\times d}} \sum_{i,j} (R_{i,j} - \langle U_i,V_j \rangle)^2$$ # Одной из основных проблем коллабораивной фильтрации является ограниченность данных:среднестатистический зритель смотрит лишь малое количество фильмов из доступного пула. Таким образом, матрица рейтингов $R$ полна пропущенных значений. # **Alternating Least Squares** # На самом деле это хорошо изученная проблема в сообществе исследователей машинного обучения. Очень популярным подходом является * Взвешенная регуляризация с чередованием наименьших квадратов* (ALS-WR). Кратко: # - Может быть легко распределен (аналогично *Стохастическому градиентному спуску*) # - Предотвращает переобучение с помощью регуляризации Тихонова # - Может легко игнорировать пропущенные записи (в отличие от SGD) # - Важно отметить, что по сравнению с SGD он демонстрирует более быструю конвергенцию # $$ \min_{U\in\mathbb{R}^{m \times d},V\in\mathbb{R}^{n\times d}} \sum_{(i,j)|R_{i,j}\neq 0} (R_{i,j} - \langle U_i,V_j \rangle)^2 + \lambda \cdot\Bigl(\sum_i n_{u_i}\| U_i \|^2 + \sum_j n_{v_j}\| V_j \|^2\Bigl) $$ # ### **a) Тренировка модели** # tempALS = ALS( maxIter=10, rank=10, regParam=0.1, nonnegative=True, userCol="userId", itemCol="movieId", ratingCol="rating", coldStartStrategy="drop", implicitPrefs=False, seed=SEED, ) mlALSFitted = tempALS.fit(dfRatingsTrain) mlALSFitted.save(RESULTS_PATH + "RESULT") mlALSFitted = ALSModel.load(RESULTS_PATH + "RESULT") # ### **b) RMSE** # **RMSE на тестовом наборе** predictions = mlALSFitted.transform(dfRatingsTest) evaluator = RegressionEvaluator( metricName="rmse", labelCol="rating", predictionCol="prediction" ) rmse = evaluator.evaluate(predictions) print("RMSE (Test Set):", rmse) # ### **c) Оценка рекомендаций** resultsALS = mlALSFitted.recommendForAllUsers(20) resultsALS = resultsALS.withColumn( "recommendations", udf_format_recommendations(F.col("recommendations")) ).toDF("userId", "predictions") # Наиболее часто рекомендуемые фильмы (для тестового набора): resultsALSExpanded = ( resultsALS.withColumn("movieId", F.explode("predictions")) .drop("predictions") .join(dfMovies, "movieId") ) resultsALSKdf = resultsALSExpanded.to_koalas() MostRecommendedMoviesForAllUsers = resultsALSKdf.groupby(["movieId", "title"])[ "userId" ].count() MostRecommendedMoviesForAllUsers = MostRecommendedMoviesForAllUsers.sort_values( ascending=False ) MostRecommendedMoviesForAllUsers.head(20) # Давайте проверим, соответствуют ли рекомендации CF вкусам каждого пользователя: resultsALS = resultsALS.join( dfRatingsTest.filter(F.col("ratingsBinary") == 1.0) .withColumn("movieId", F.col("movieId").cast("int")) .groupby("userId") .agg(F.collect_list("movieId").alias("groundTruth")), "userId", ) # resultsALS.orderBy('userId').limit(10).show(10) resultsALSMetrics = get_rec_sys_results(resultsALS, dfMovies) pprint(resultsALSMetrics)
false
0
4,878
1
4,878
4,878
129066297
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) import nltk import nltk.corpus from nltk.tokenize import RegexpTokenizer import dask import datetime from dask import bag as db from dask import dataframe as dd # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import json a = db.read_text("/kaggle/input/tweets-1504/tweets1504.txt", blocksize="50MB") # takes away/ cuts of the first and the last sign of every line in the text-document # json.dumps makes a json-string out of a python-object # json.loads makes a python-object out of a json-string # sort_key=True as argument in dumps, sorts the key alphabetically. The keys are the column names. def cleaner(a): return a[0:-2] # map uses the above defined function "cleaner" on every element b = a.map(cleaner).map(json.loads).to_dataframe() b.set_index("tweet_id").to_parquet("parquet") df = dd.read_parquet("parquet") df.head(5)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/066/129066297.ipynb
null
null
[{"Id": 129066297, "ScriptId": 38366418, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9196404, "CreationDate": "05/10/2023 17:46:36", "VersionNumber": 1.0, "Title": "notebookc1bbcf8636", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 49.0, "LinesInsertedFromPrevious": 49.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) import nltk import nltk.corpus from nltk.tokenize import RegexpTokenizer import dask import datetime from dask import bag as db from dask import dataframe as dd # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import json a = db.read_text("/kaggle/input/tweets-1504/tweets1504.txt", blocksize="50MB") # takes away/ cuts of the first and the last sign of every line in the text-document # json.dumps makes a json-string out of a python-object # json.loads makes a python-object out of a json-string # sort_key=True as argument in dumps, sorts the key alphabetically. The keys are the column names. def cleaner(a): return a[0:-2] # map uses the above defined function "cleaner" on every element b = a.map(cleaner).map(json.loads).to_dataframe() b.set_index("tweet_id").to_parquet("parquet") df = dd.read_parquet("parquet") df.head(5)
false
0
427
1
427
427
129066393
<jupyter_start><jupyter_text>Airbnb Prices in European Cities _____ # Airbnb Prices in European Cities ### Determinants of Price by Room Type, Location, Cleanliness Rating, and More By [[source]](https://zenodo.org/record/4446043#.Y9Y9ENJBwUE) _____ ### About this dataset > This dataset provides a comprehensive look at Airbnb prices in some of the most popular European cities. Each listing is evaluated for various attributes such as room types, cleanliness and satisfaction ratings, bedrooms, distance from the city centre, and more to capture an in-depth understanding of Airbnb prices on both weekdays and weekends. Using spatial econometric methods, we analyse and identify the determinants of Airbnb prices across these cities. Our dataset includes information such as realSum (the total price of the listing), room_type (private/shared/entire home/apt), host_is_superhost (boolean value indicating if host is a superhost or not), multi (indicator whether listing is for multiple rooms or not), biz (business indicator) , guest_satisfaction_overall (overall rating from guests camparing all listings offered by host ), bedrooms, dist (distance from city center) , lng & lat coordinates for location identification etc. We hope that this data set offers insight into how global markets are affected by social dynamics and geographical factors which in turn determine pricing strategies for optimal profitability! ### More Datasets > For more datasets, click [here](https://www.kaggle.com/thedevastator/datasets). ### Featured Notebooks > - 🚨 **Your notebook can be here!** 🚨! ### How to use the dataset > This dataset can be used by individuals and companies to gain insight on the cost of Airbnb listings in some of the most popular European cities. It contains information on a variety of attributes such as room type, cleanliness rating, guest satisfaction, distance from the city centre, and more. In addition to exploring general trends in prices across Europe, this dataset can be used for deeper spatial econometric analysis. > > To begin using this dataset for your own research or analysis project: > - Download the files which contain both weekday and weekend listings data for European cities. > - Familiarize yourself with the columns included in each file; these provide descriptions of various attributes associated with each listing. > - Calculate any desired summary statistics - average price per night per city or room type etc. - using statistical software (e.g Excel). > - Perform spatial econometric analysis if desired; use specialized packages such as `spdep` or `spatialreg` in R to identify determinants of Airbnb price levels across Europe (e.g., metro distance). > - Visualize your results with GIS software if necessary to more easily understand patterns between variables like proximity/location and price level (e.g., QGIS). > > By leveraging both descriptive and inferential methods while taking advantage of geographic information systems (GIS), users can apply this dataset to many interesting questions related to rental prices on Airbnb in Europe! ### Research Ideas > - Analyzing spatial trends in Airbnb prices across Europe and finding the most favorable cities for hosting. > - Comparing differences between weekday vs weekend booking patterns to project rental rates for vacationers and business travelers in European cities. > - Using spatial econometrics methods to find important determinants of Airbnb prices in order to provide insights into areas of opportunity for improvement, or assess the effectiveness of existing policy changes concerning vacation rentals Kaggle dataset identifier: airbnb-prices-in-european-cities <jupyter_script># # EARIN Project - G27 (Marta Tolsà & Thomas Jost) # ## Preliminary Documentation # ### A short description of the algorithms that will be used, along with some examples. # **1. Spatial Analysis:** # - **Spatial clustering:** this algorithm can be used to identify spatial clusters of similar Airbnb across Europe. It helps in finding regions or cities with comparable pricing patterns. # - **Spatial interpolation:** this algorithm can be applied to estimate Airbnb prices in areas where data is missing or sparse. It helps in creating a comprehensive price map. # - **Hotspot analysis:** this algorithm can identify hotspots of high or low Airbnb prices. It helps in identifying cities or regions with exceptionally high or low prices compared to the average. # **2. Time Series Analysis:** # - **Seasonal decomposition:** this method can be used to decompose the time series data of Airbnb prices into trend, seasonal, and residual components. It helps in understanding the seasonal patterns and trends in rental rates. # - **ARIMA (AutoRegressive Integrated Moving Average):** this algorithm can be employed to forecast future rental rates based on historical patterns. We can take into account the autoregressive and moving average components of the time series data. # - **Time series regression:** this method can be used to analyze the relationship between Airbnb prices and factors such as weekdays/weekends, holidyas, or other relevant variables. It helps in understanding the differences in booking patterns and their impact on rental rates. # **3. Spatial Econometrics:** # - **Spatial regression models:** these models can be used to examine the relationship between Airbnb prices and various determinants such as location, amenities, neighborhood characteristics, and accessibility to amenities or transportation. Examples of these models include Spatial Lag Model (SLM) and Spatial Error Model (SEM). # - **Spatial Durbin Model:** this model can be used to capture spatial dependencies among the determinants and Airbnb prices, considering both direct and indirect spatial effects. # - **Geographically Weighted Regression (GWR):** this method can be applied to explore spatially varying relationships between Airbnb prices and determinants. It helps in identifying local variations and understanding the heterogeneity of price determinants across different areas. # To sum up: # - We could analize spatial trends in Airbnb prices across Europe and find the most favorable cities for hosting. # - We could compare differences between weekday vs weekend booking patterns to project rental rates for vacationers and business travelers in European cities. # - We can use spatial econometrics methods to find important determinants of Airbnb prices in order to provide insights into areas of opportunity for improvement, or assess the effectiveness of existing policy changes concerning vacation rentals. # ### Selection and description of the datasets. # This dataset provides a comprehensive look at Airbnb prices in some of the most popular European cities. Each listing is evaluated for various attributes such as room types, cleanliness and satisfaction ratings, bedrooms, distance from the city centre, and more to capture an in-depth understanding of Airbnb prices on both weekdays and weekends. # Using spatial econometric methods, we analyse and identify the determinants of Airbnb prices across these cities. # Our dataset includes information such as: # - **realSum** (the total price of the Airbnb listing) # - **room_type** (the type of room available in the listing: "private","shared", "entire home/apt", indicating wheter guests have exclusive access to the entire property or if they will be sharing the space with others) # - **room_shared** (binary indicator (0/1) that specifies wheter the room is shared with other guests) # - **room_private** (binary indicator (0/1) that specifies wheter the room is private, meaning it is not shared with other guests) # - **person_capacity** (the maximum number of people that can be accommodated in the listing) # - **host_is_superhost** (boolean value indicating if host is a superhost or not. Superhosts are recognized for providing exceptional experiences to guests) # - **multi** (indicator (0/1) of whether the listing is for multiple rooms or not) # - **biz** (business indicator, providing information about wheter the listing is primarily intended for business purposes) # - **cleanliness_rating** (the satisfaction of previous guests with the cleanliness of the accomodation) # - **guest_satisfaction_overall** (the overall rating provided by guests, comparing all the listings offered by the host. It captures the overall satisfaction level of guests) # - **bedrooms** (number of bedrooms available in the listing) # - **dist** (distance from the city center. It provides information on the proximity of the accomodation to the central area of the city) # - **metro_dist** (distance from the nearest metro station, providing insights into the accessibility of public transportation) # - **attr_index** (index or score related to the attractiveness of the location or surrounding area) # - **attr_index_norm** (normalized version of the attr_index, which standardizes the values for easier comparison) # - **rest_index** (index or score related to the availability of restaurants or dining options in the vicinity of the listing) # - **rest_index_norm** (normalized version of the rest_index) # - **lng** (the longitude coordinates for location identification) # - **lat** (the latitude coordinates for location identification) # This data set could offers insight into how global markets are affected by social dynamics and geographical factors which in turn determine pricing strategies for optimal profitability. # ### General plan of tests/experiments. # The general plan of tests/experiments could be as follows: # **1. Data cleaning and preprocessing:** Before conducting any analysis, we ensure that the downloaded dataset is clean and free from any inconsistencies or missing values. We perform data cleaning and preprocessing steps, such as removing duplicates, handling missing data, and standardizing variables, to ensure the quality of the data. # In our case, first of all we download the files which contain both weekday and weekend listings data for European cities. # And we also familiarize with the columns included in each file; these provide descriptions of various attributes associated with each listing. # **2. Hypothesis formulation:** we clearly define the research questions and hypotheses that you want to investigate. This will help guide our analysis and ensure that we are addressing specific objectives, such as identifying the most important determinants of Airbnb prices or understanding spatial trends. # **3. Feature engineering:** we explore additional features or variables that can enhance our analysis. For example, we could calculate additional metrics like average ratings per neighborhood, distance to popular landmarks, or availability of amenities. These additional features can provide valuable insights into the factors influencing Airbnb prices. # In our case, we could calculate any desired summary statistics - average price per night per city or room type etc. - using statistical software (e.g Excel). # **4. Comparative analysis:** we consider conducting a comparative analysis between different cities or regions within Europe. This can help identify variations in pricing patterns, factors driving prices, and opportunities for targeting specific markets. # **5. Robust statistical methods:** we utilize advanced statistical methods that are suitable for analyzing spatial and temporal data. For example, we consider using geospatial regression models, time series models with spatial dependencies, or machine learning algorithms for predictive analysis. # We can perform spatial econometric analysis if desired; use specialized packages such as spdep or spatialreg in R to identify determinants of Airbnb price levels across Europe (e.g., metro distance). # **6. Sensitivity analysis:** we perform sensitivity analysis to assess the stability and reliability of our findings. We test the robustness of our results by varying assumptions or including different variables to ensure the consistency and validity of our conclusions. # We could visualize the results with GIS software if necessary to more easily understand patterns between variables like proximity/location and price level (e.g., QGIS). # **7. Incorporate feedback and iterations:** we seek feedback from domain experts or peers to validate our approach and interpretation of the results. We could incorporate feedback and iterate on our analysis to refine our methodology and ensure the accuracy and reliability of our findings. # This plan of tests and experiments will help understand the spatial patterns and determinants of Airbnb prices in European cities, enabling the identification of key factors that influence prices and informed decision-making to optimize profitability. # ### Methods of result visualization. # Several options to visualize the results of our dataset are: # - **Scatter plots:** we can use scatter plots to show the relationship between two variables, such as price and distance from the city center. Each point in the plot represents an Airbnb listing, and its position on the graph indicates the values of both variables. This will allow us to identify visual patterns or trends. # - **Heatmaps:** Heatmaps are useful for visualizing the spatial distribution of Airbnb prices in European cities. We can use colors to represent different price ranges and overlay them on a geographical map. This will help us identify areas with higher or lower prices. # - **Bar charts or box plots:** These charts are suitable for comparing average Airbnb prices between different cities or room types. We can display the average prices in bars or boxes, allowing us to identify price differences among different categories. # - **Line charts:** We can use line charts to visualize temporal trends in Airbnb prices. We can plot the average price over time for different cities or compare prices of different room types over time. # - **Thematic maps:** we can use colors or shades to represent different price ranges on the map. This will facilitate the identification of cities with higher or lower prices. # - **Network graphs:** If we want to visualize the relationship between different variables, such as guest satisfaction and the number of rooms, we can use network graphs. These graphs represent variables as interconnected nodes, allowing us to see connections and relationships between them. # We will choose visualization methods that best suit our data and the objectives of our analysis. And we will use clear colors, labels, and legends to facilitate the interpretation of graphs and maps. # ### Definition of quality measures that will be used. # We can consider following quality measures to assess the accuracy, reliability, and overall quality of the data: # **Data Completeness:** This measure evaluates the extent to which the dataset is complete, meaning it has minimal missing values or gaps. We will calculate the percentage of missing values for each variable to identify areas of data incompleteness. # **Data Consistency:** This measure assesses the consistency of the data across different variables. We will check for any inconsistencies or contradictions in the data, such as contradictory values or illogical relationships between variables. # **Data Accuracy:** This measure determines the accuracy of the data by comparing it with reliable external sources or ground truth information. We can cross-reference Airbnb prices with other sources or validate the data through expert opinions or surveys. # **Outlier Detection:** We can identify outliers, which are extreme values that deviate significantly from the typical pattern of the data. Detecting outliers helps identify potential errors or anomalies in the dataset. # **Data Validity:** We can ensure that the data aligns with the intended research scope and objectives. We will validate that the dataset includes relevant variables and attributes that accurately capture the factors influencing Airbnb prices. # **Data Reliability:** We can assess the reliability of the data collection process and the sources from which the data was obtained. We will consider factors such as data collection methodology, sampling techniques, and the reputation of the data sources. # **Data Relevance:** We could evaluate the relevance of the data to our research questions and objectives. And also, we can determine whether the dataset adequately represents the diversity of European cities and captures the necessary variables for our analysis. # **Data Quality Documentation:** We will maintain comprehensive documentation that describes the data collection process, data cleaning procedures, and any assumptions or limitations associated with the dataset. This documentation ensures transparency and reproducibility of the analysis. # By applying these quality measures to our dataset, we can ensure that our analysis is based on reliable and accurate data, leading to more robust insights and informed decision-making. # **IMPORT NECESSARY LIBRARIES** import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import math import xgboost as xgb from xgboost import XGBRegressor from sklearn.preprocessing import StandardScaler, PolynomialFeatures from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.feature_selection import SequentialFeatureSelector, SelectFromModel from sklearn.linear_model import LinearRegression, Ridge, Lasso from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error, r2_score import warnings # **IMPORT DATASETS** amsterdam_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/amsterdam_weekdays.csv" ) amsterdam_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/amsterdam_weekends.csv" ) athens_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/athens_weekdays.csv" ) athens_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/athens_weekends.csv" ) barcelona_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/barcelona_weekdays.csv" ) barcelona_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/barcelona_weekends.csv" ) berlin_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/berlin_weekdays.csv" ) berlin_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/berlin_weekends.csv" ) budapest_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/budapest_weekdays.csv" ) budapest_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/budapest_weekends.csv" ) lisbon_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/lisbon_weekdays.csv" ) lisbon_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/lisbon_weekends.csv" ) london_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/london_weekdays.csv" ) london_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/london_weekends.csv" ) paris_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/paris_weekdays.csv" ) paris_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/paris_weekends.csv" ) rome_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/rome_weekdays.csv" ) rome_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/rome_weekends.csv" ) vienna_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/vienna_weekdays.csv" ) vienna_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/vienna_weekends.csv" ) # **DIMENSIONS OF DATASETS** print("amsterdam_weekdays = ", amsterdam_weekdays.shape) print("amsterdam_weekends = ", amsterdam_weekends.shape) print("athens_weekdays = ", athens_weekdays.shape) print("athens_weekends = ", athens_weekends.shape) print("barcelona_weekdays = ", barcelona_weekdays.shape) print("barcelona_weekends = ", barcelona_weekends.shape) print("berlin_weekdays = ", berlin_weekdays.shape) print("berlin_weekends = ", berlin_weekends.shape) print("budapest_weekdays = ", budapest_weekdays.shape) print("budapest_weekends = ", budapest_weekends.shape) print("lisbon_weekdays = ", lisbon_weekdays.shape) print("lisbon_weekends = ", lisbon_weekends.shape) print("london_weekdays = ", london_weekdays.shape) print("london_weekends = ", london_weekends.shape) print("paris_weekdays = ", paris_weekdays.shape) print("paris_weekends = ", paris_weekends.shape) print("rome_weekdays = ", rome_weekdays.shape) print("rome_weekends = ", rome_weekends.shape) print("vienna_weekdays = ", vienna_weekdays.shape) print("vienna_weekends = ", vienna_weekends.shape) # **FEATURES OF THE DATASET** print(amsterdam_weekdays.columns) print(amsterdam_weekends.columns) print(athens_weekdays.columns) print(athens_weekends.columns) print(barcelona_weekdays.columns) print(barcelona_weekends.columns) print(berlin_weekdays.columns) print(berlin_weekends.columns) print(budapest_weekdays.columns) print(budapest_weekends.columns) print(lisbon_weekdays.columns) print(lisbon_weekends.columns) print(london_weekdays.columns) print(london_weekends.columns) print(paris_weekdays.columns) print(paris_weekends.columns) print(rome_weekdays.columns) print(rome_weekends.columns) print(vienna_weekdays.columns) print(vienna_weekends.columns) # We can see that the number of features in all files are the same; however, the number of records is different. # If we observe the list of feature names, we can see that all datasets have the same number, as well as the same features. So, they can get together in order to convert all these different datasets into a single dataset. # **COMBINE ALL DATASETS INTO A SINGLE ONE** # def combine(csv_1, col_1, csv_2, col_2, city): # We combine the "weekdays" and "weekends" datasets of the individual datasets into 1 datset for a particular city. csv_1["week time"] = col_1 csv_2["week time"] = col_2 # We remove the "Unnamed: 0" feature since it's the index number of the records and isn't useful csv_1.drop(columns=["Unnamed: 0"], inplace=True) csv_2.drop(columns=["Unnamed: 0"], inplace=True) merged = pd.concat([csv_1, csv_2]) # We add the name of the city and we put it into a new column "city", since we will be combining all these cities datasets and we would need the differentiate the data in some way for analysis and insights merged["city"] = city return merged amsterdam = combine( amsterdam_weekdays, "weekdays", amsterdam_weekends, "weekends", "amsterdam" ) athens = combine(athens_weekdays, "weekdays", athens_weekends, "weekends", "athens") barcelona = combine( barcelona_weekdays, "weekdays", barcelona_weekends, "weekends", "barcelona" ) berlin = combine(berlin_weekdays, "weekdays", berlin_weekends, "weekends", "berlin") budapest = combine( budapest_weekdays, "weekdays", budapest_weekends, "weekends", "budapest" ) lisbon = combine(lisbon_weekdays, "weekdays", lisbon_weekends, "weekends", "lisbon") london = combine(london_weekdays, "weekdays", london_weekends, "weekends", "london") paris = combine(paris_weekdays, "weekdays", paris_weekends, "weekends", "paris") rome = combine(rome_weekdays, "weekdays", rome_weekends, "weekends", "rome") vienna = combine(vienna_weekdays, "weekdays", vienna_weekends, "weekends", "vienna") cities_names = [ "amsterdam", "athens", "barcelona", "berlin", "budapest", "lisbon", "london", "paris", "rome", "vienna", ] cities = [ amsterdam, athens, barcelona, berlin, budapest, lisbon, london, paris, rome, vienna, ] # we use concat function of pandas and we vertically stacked data of all cities, to transform them into a single data called "airbnb_data" airbnb_data = pd.concat(cities, ignore_index=True) airbnb_data # ## 1. Data cleaning and preprocessing # We show first 5 lines of our dataset with function "head": airbnb_data.head() # We check if there is NaN or null values in the dataset: airbnb_data.isna().sum()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/066/129066393.ipynb
airbnb-prices-in-european-cities
thedevastator
[{"Id": 129066393, "ScriptId": 37945030, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8460044, "CreationDate": "05/10/2023 17:47:40", "VersionNumber": 5.0, "Title": "PROJECT_AI_G27", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 311.0, "LinesInsertedFromPrevious": 161.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 150.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184793569, "KernelVersionId": 129066393, "SourceDatasetVersionId": 5030966}]
[{"Id": 5030966, "DatasetId": 2919695, "DatasourceVersionId": 5101124, "CreatorUserId": 10654180, "LicenseName": "CC0: Public Domain", "CreationDate": "02/20/2023 09:48:04", "VersionNumber": 2.0, "Title": "Airbnb Prices in European Cities", "Slug": "airbnb-prices-in-european-cities", "Subtitle": "Determinants of Price by Room Type, Location, Cleanliness Rating, and More", "Description": "_____\n# Airbnb Prices in European Cities\n### Determinants of Price by Room Type, Location, Cleanliness Rating, and More\nBy [[source]](https://zenodo.org/record/4446043#.Y9Y9ENJBwUE)\n_____\n\n### About this dataset\n> This dataset provides a comprehensive look at Airbnb prices in some of the most popular European cities. Each listing is evaluated for various attributes such as room types, cleanliness and satisfaction ratings, bedrooms, distance from the city centre, and more to capture an in-depth understanding of Airbnb prices on both weekdays and weekends. Using spatial econometric methods, we analyse and identify the determinants of Airbnb prices across these cities. Our dataset includes information such as realSum (the total price of the listing), room_type (private/shared/entire home/apt), host_is_superhost (boolean value indicating if host is a superhost or not), multi (indicator whether listing is for multiple rooms or not), biz (business indicator) , guest_satisfaction_overall (overall rating from guests camparing all listings offered by host ), bedrooms, dist (distance from city center) , lng & lat coordinates for location identification etc. We hope that this data set offers insight into how global markets are affected by social dynamics and geographical factors which in turn determine pricing strategies for optimal profitability!\n\n### More Datasets\n> For more datasets, click [here](https://www.kaggle.com/thedevastator/datasets).\n\n### Featured Notebooks\n> - \ud83d\udea8 **Your notebook can be here!** \ud83d\udea8! \n\n### How to use the dataset\n> This dataset can be used by individuals and companies to gain insight on the cost of Airbnb listings in some of the most popular European cities. It contains information on a variety of attributes such as room type, cleanliness rating, guest satisfaction, distance from the city centre, and more. In addition to exploring general trends in prices across Europe, this dataset can be used for deeper spatial econometric analysis. \n> \n> To begin using this dataset for your own research or analysis project: \n> - Download the files which contain both weekday and weekend listings data for European cities. \n> - Familiarize yourself with the columns included in each file; these provide descriptions of various attributes associated with each listing. \n> - Calculate any desired summary statistics - average price per night per city or room type etc. - using statistical software (e.g Excel). \n> - Perform spatial econometric analysis if desired; use specialized packages such as `spdep` or `spatialreg` in R to identify determinants of Airbnb price levels across Europe (e.g., metro distance). \n> - Visualize your results with GIS software if necessary to more easily understand patterns between variables like proximity/location and price level (e.g., QGIS). \n> \n> By leveraging both descriptive and inferential methods while taking advantage of geographic information systems (GIS), users can apply this dataset to many interesting questions related to rental prices on Airbnb in Europe!\n\n### Research Ideas\n> - Analyzing spatial trends in Airbnb prices across Europe and finding the most favorable cities for hosting.\n> - Comparing differences between weekday vs weekend booking patterns to project rental rates for vacationers and business travelers in European cities. \n> - Using spatial econometrics methods to find important determinants of Airbnb prices in order to provide insights into areas of opportunity for improvement, or assess the effectiveness of existing policy changes concerning vacation rentals\n\n### Acknowledgements\n> If you use this dataset in your research, please credit the original authors.\n> [Data Source](https://zenodo.org/record/4446043#.Y9Y9ENJBwUE)\n> \n>\n\n\n### License\n> \n> \n> **License: [CC0 1.0 Universal (CC0 1.0) - Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/)**\n> No Copyright - You can copy, modify, distribute and perform the work, even for commercial purposes, all without asking permission. [See Other Information](https://creativecommons.org/publicdomain/zero/1.0/).\n\n### Columns\n\n**File: vienna_weekdays.csv**\n| Column name | Description |\n|:-------------------------------|:---------------------------------------------------------------------------|\n| **realSum** | The total price of the Airbnb listing. (Numeric) |\n| **room_type** | The type of room being offered (e.g. private, shared, etc.). (Categorical) |\n| **room_shared** | Whether the room is shared or not. (Boolean) |\n| **room_private** | Whether the room is private or not. (Boolean) |\n| **person_capacity** | The maximum number of people that can stay in the room. (Numeric) |\n| **host_is_superhost** | Whether the host is a superhost or not. (Boolean) |\n| **multi** | Whether the listing is for multiple rooms or not. (Boolean) |\n| **biz** | Whether the listing is for business purposes or not. (Boolean) |\n| **cleanliness_rating** | The cleanliness rating of the listing. (Numeric) |\n| **guest_satisfaction_overall** | The overall guest satisfaction rating of the listing. (Numeric) |\n| **bedrooms** | The number of bedrooms in the listing. (Numeric) |\n| **dist** | The distance from the city centre. (Numeric) |\n| **metro_dist** | The distance from the nearest metro station. (Numeric) |\n| **lng** | The longitude of the listing. (Numeric) |\n| **lat** | The latitude of the listing. (Numeric) |\n\n_____\n\n**File: vienna_weekends.csv**\n| Column name | Description |\n|:-------------------------------|:---------------------------------------------------------------------------|\n| **realSum** | The total price of the Airbnb listing. (Numeric) |\n| **room_type** | The type of room being offered (e.g. private, shared, etc.). (Categorical) |\n| **room_shared** | Whether the room is shared or not. (Boolean) |\n| **room_private** | Whether the room is private or not. (Boolean) |\n| **person_capacity** | The maximum number of people that can stay in the room. (Numeric) |\n| **host_is_superhost** | Whether the host is a superhost or not. (Boolean) |\n| **multi** | Whether the listing is for multiple rooms or not. (Boolean) |\n| **biz** | Whether the listing is for business purposes or not. (Boolean) |\n| **cleanliness_rating** | The cleanliness rating of the listing. (Numeric) |\n| **guest_satisfaction_overall** | The overall guest satisfaction rating of the listing. (Numeric) |\n| **bedrooms** | The number of bedrooms in the listing. (Numeric) |\n| **dist** | The distance from the city centre. (Numeric) |\n| **metro_dist** | The distance from the nearest metro station. (Numeric) |\n| **lng** | The longitude of the listing. (Numeric) |\n| **lat** | The latitude of the listing. (Numeric) |\n\n### Acknowledgements\n> If you use this dataset in your research, please credit the original authors.\n> If you use this dataset in your research, please credit [](https://zenodo.org/record/4446043#.Y9Y9ENJBwUE).", "VersionNotes": "version update", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2919695, "CreatorUserId": 10654180, "OwnerUserId": 10654180.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5030966.0, "CurrentDatasourceVersionId": 5101124.0, "ForumId": 2957092, "Type": 2, "CreationDate": "02/20/2023 09:47:19", "LastActivityDate": "02/20/2023", "TotalViews": 60660, "TotalDownloads": 10618, "TotalVotes": 162, "TotalKernels": 27}]
[{"Id": 10654180, "UserName": "thedevastator", "DisplayName": "The Devastator", "RegisterDate": "05/26/2022", "PerformanceTier": 4}]
# # EARIN Project - G27 (Marta Tolsà & Thomas Jost) # ## Preliminary Documentation # ### A short description of the algorithms that will be used, along with some examples. # **1. Spatial Analysis:** # - **Spatial clustering:** this algorithm can be used to identify spatial clusters of similar Airbnb across Europe. It helps in finding regions or cities with comparable pricing patterns. # - **Spatial interpolation:** this algorithm can be applied to estimate Airbnb prices in areas where data is missing or sparse. It helps in creating a comprehensive price map. # - **Hotspot analysis:** this algorithm can identify hotspots of high or low Airbnb prices. It helps in identifying cities or regions with exceptionally high or low prices compared to the average. # **2. Time Series Analysis:** # - **Seasonal decomposition:** this method can be used to decompose the time series data of Airbnb prices into trend, seasonal, and residual components. It helps in understanding the seasonal patterns and trends in rental rates. # - **ARIMA (AutoRegressive Integrated Moving Average):** this algorithm can be employed to forecast future rental rates based on historical patterns. We can take into account the autoregressive and moving average components of the time series data. # - **Time series regression:** this method can be used to analyze the relationship between Airbnb prices and factors such as weekdays/weekends, holidyas, or other relevant variables. It helps in understanding the differences in booking patterns and their impact on rental rates. # **3. Spatial Econometrics:** # - **Spatial regression models:** these models can be used to examine the relationship between Airbnb prices and various determinants such as location, amenities, neighborhood characteristics, and accessibility to amenities or transportation. Examples of these models include Spatial Lag Model (SLM) and Spatial Error Model (SEM). # - **Spatial Durbin Model:** this model can be used to capture spatial dependencies among the determinants and Airbnb prices, considering both direct and indirect spatial effects. # - **Geographically Weighted Regression (GWR):** this method can be applied to explore spatially varying relationships between Airbnb prices and determinants. It helps in identifying local variations and understanding the heterogeneity of price determinants across different areas. # To sum up: # - We could analize spatial trends in Airbnb prices across Europe and find the most favorable cities for hosting. # - We could compare differences between weekday vs weekend booking patterns to project rental rates for vacationers and business travelers in European cities. # - We can use spatial econometrics methods to find important determinants of Airbnb prices in order to provide insights into areas of opportunity for improvement, or assess the effectiveness of existing policy changes concerning vacation rentals. # ### Selection and description of the datasets. # This dataset provides a comprehensive look at Airbnb prices in some of the most popular European cities. Each listing is evaluated for various attributes such as room types, cleanliness and satisfaction ratings, bedrooms, distance from the city centre, and more to capture an in-depth understanding of Airbnb prices on both weekdays and weekends. # Using spatial econometric methods, we analyse and identify the determinants of Airbnb prices across these cities. # Our dataset includes information such as: # - **realSum** (the total price of the Airbnb listing) # - **room_type** (the type of room available in the listing: "private","shared", "entire home/apt", indicating wheter guests have exclusive access to the entire property or if they will be sharing the space with others) # - **room_shared** (binary indicator (0/1) that specifies wheter the room is shared with other guests) # - **room_private** (binary indicator (0/1) that specifies wheter the room is private, meaning it is not shared with other guests) # - **person_capacity** (the maximum number of people that can be accommodated in the listing) # - **host_is_superhost** (boolean value indicating if host is a superhost or not. Superhosts are recognized for providing exceptional experiences to guests) # - **multi** (indicator (0/1) of whether the listing is for multiple rooms or not) # - **biz** (business indicator, providing information about wheter the listing is primarily intended for business purposes) # - **cleanliness_rating** (the satisfaction of previous guests with the cleanliness of the accomodation) # - **guest_satisfaction_overall** (the overall rating provided by guests, comparing all the listings offered by the host. It captures the overall satisfaction level of guests) # - **bedrooms** (number of bedrooms available in the listing) # - **dist** (distance from the city center. It provides information on the proximity of the accomodation to the central area of the city) # - **metro_dist** (distance from the nearest metro station, providing insights into the accessibility of public transportation) # - **attr_index** (index or score related to the attractiveness of the location or surrounding area) # - **attr_index_norm** (normalized version of the attr_index, which standardizes the values for easier comparison) # - **rest_index** (index or score related to the availability of restaurants or dining options in the vicinity of the listing) # - **rest_index_norm** (normalized version of the rest_index) # - **lng** (the longitude coordinates for location identification) # - **lat** (the latitude coordinates for location identification) # This data set could offers insight into how global markets are affected by social dynamics and geographical factors which in turn determine pricing strategies for optimal profitability. # ### General plan of tests/experiments. # The general plan of tests/experiments could be as follows: # **1. Data cleaning and preprocessing:** Before conducting any analysis, we ensure that the downloaded dataset is clean and free from any inconsistencies or missing values. We perform data cleaning and preprocessing steps, such as removing duplicates, handling missing data, and standardizing variables, to ensure the quality of the data. # In our case, first of all we download the files which contain both weekday and weekend listings data for European cities. # And we also familiarize with the columns included in each file; these provide descriptions of various attributes associated with each listing. # **2. Hypothesis formulation:** we clearly define the research questions and hypotheses that you want to investigate. This will help guide our analysis and ensure that we are addressing specific objectives, such as identifying the most important determinants of Airbnb prices or understanding spatial trends. # **3. Feature engineering:** we explore additional features or variables that can enhance our analysis. For example, we could calculate additional metrics like average ratings per neighborhood, distance to popular landmarks, or availability of amenities. These additional features can provide valuable insights into the factors influencing Airbnb prices. # In our case, we could calculate any desired summary statistics - average price per night per city or room type etc. - using statistical software (e.g Excel). # **4. Comparative analysis:** we consider conducting a comparative analysis between different cities or regions within Europe. This can help identify variations in pricing patterns, factors driving prices, and opportunities for targeting specific markets. # **5. Robust statistical methods:** we utilize advanced statistical methods that are suitable for analyzing spatial and temporal data. For example, we consider using geospatial regression models, time series models with spatial dependencies, or machine learning algorithms for predictive analysis. # We can perform spatial econometric analysis if desired; use specialized packages such as spdep or spatialreg in R to identify determinants of Airbnb price levels across Europe (e.g., metro distance). # **6. Sensitivity analysis:** we perform sensitivity analysis to assess the stability and reliability of our findings. We test the robustness of our results by varying assumptions or including different variables to ensure the consistency and validity of our conclusions. # We could visualize the results with GIS software if necessary to more easily understand patterns between variables like proximity/location and price level (e.g., QGIS). # **7. Incorporate feedback and iterations:** we seek feedback from domain experts or peers to validate our approach and interpretation of the results. We could incorporate feedback and iterate on our analysis to refine our methodology and ensure the accuracy and reliability of our findings. # This plan of tests and experiments will help understand the spatial patterns and determinants of Airbnb prices in European cities, enabling the identification of key factors that influence prices and informed decision-making to optimize profitability. # ### Methods of result visualization. # Several options to visualize the results of our dataset are: # - **Scatter plots:** we can use scatter plots to show the relationship between two variables, such as price and distance from the city center. Each point in the plot represents an Airbnb listing, and its position on the graph indicates the values of both variables. This will allow us to identify visual patterns or trends. # - **Heatmaps:** Heatmaps are useful for visualizing the spatial distribution of Airbnb prices in European cities. We can use colors to represent different price ranges and overlay them on a geographical map. This will help us identify areas with higher or lower prices. # - **Bar charts or box plots:** These charts are suitable for comparing average Airbnb prices between different cities or room types. We can display the average prices in bars or boxes, allowing us to identify price differences among different categories. # - **Line charts:** We can use line charts to visualize temporal trends in Airbnb prices. We can plot the average price over time for different cities or compare prices of different room types over time. # - **Thematic maps:** we can use colors or shades to represent different price ranges on the map. This will facilitate the identification of cities with higher or lower prices. # - **Network graphs:** If we want to visualize the relationship between different variables, such as guest satisfaction and the number of rooms, we can use network graphs. These graphs represent variables as interconnected nodes, allowing us to see connections and relationships between them. # We will choose visualization methods that best suit our data and the objectives of our analysis. And we will use clear colors, labels, and legends to facilitate the interpretation of graphs and maps. # ### Definition of quality measures that will be used. # We can consider following quality measures to assess the accuracy, reliability, and overall quality of the data: # **Data Completeness:** This measure evaluates the extent to which the dataset is complete, meaning it has minimal missing values or gaps. We will calculate the percentage of missing values for each variable to identify areas of data incompleteness. # **Data Consistency:** This measure assesses the consistency of the data across different variables. We will check for any inconsistencies or contradictions in the data, such as contradictory values or illogical relationships between variables. # **Data Accuracy:** This measure determines the accuracy of the data by comparing it with reliable external sources or ground truth information. We can cross-reference Airbnb prices with other sources or validate the data through expert opinions or surveys. # **Outlier Detection:** We can identify outliers, which are extreme values that deviate significantly from the typical pattern of the data. Detecting outliers helps identify potential errors or anomalies in the dataset. # **Data Validity:** We can ensure that the data aligns with the intended research scope and objectives. We will validate that the dataset includes relevant variables and attributes that accurately capture the factors influencing Airbnb prices. # **Data Reliability:** We can assess the reliability of the data collection process and the sources from which the data was obtained. We will consider factors such as data collection methodology, sampling techniques, and the reputation of the data sources. # **Data Relevance:** We could evaluate the relevance of the data to our research questions and objectives. And also, we can determine whether the dataset adequately represents the diversity of European cities and captures the necessary variables for our analysis. # **Data Quality Documentation:** We will maintain comprehensive documentation that describes the data collection process, data cleaning procedures, and any assumptions or limitations associated with the dataset. This documentation ensures transparency and reproducibility of the analysis. # By applying these quality measures to our dataset, we can ensure that our analysis is based on reliable and accurate data, leading to more robust insights and informed decision-making. # **IMPORT NECESSARY LIBRARIES** import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import math import xgboost as xgb from xgboost import XGBRegressor from sklearn.preprocessing import StandardScaler, PolynomialFeatures from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.feature_selection import SequentialFeatureSelector, SelectFromModel from sklearn.linear_model import LinearRegression, Ridge, Lasso from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error, r2_score import warnings # **IMPORT DATASETS** amsterdam_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/amsterdam_weekdays.csv" ) amsterdam_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/amsterdam_weekends.csv" ) athens_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/athens_weekdays.csv" ) athens_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/athens_weekends.csv" ) barcelona_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/barcelona_weekdays.csv" ) barcelona_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/barcelona_weekends.csv" ) berlin_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/berlin_weekdays.csv" ) berlin_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/berlin_weekends.csv" ) budapest_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/budapest_weekdays.csv" ) budapest_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/budapest_weekends.csv" ) lisbon_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/lisbon_weekdays.csv" ) lisbon_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/lisbon_weekends.csv" ) london_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/london_weekdays.csv" ) london_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/london_weekends.csv" ) paris_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/paris_weekdays.csv" ) paris_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/paris_weekends.csv" ) rome_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/rome_weekdays.csv" ) rome_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/rome_weekends.csv" ) vienna_weekdays = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/vienna_weekdays.csv" ) vienna_weekends = pd.read_csv( "/kaggle/input/airbnb-prices-in-european-cities/vienna_weekends.csv" ) # **DIMENSIONS OF DATASETS** print("amsterdam_weekdays = ", amsterdam_weekdays.shape) print("amsterdam_weekends = ", amsterdam_weekends.shape) print("athens_weekdays = ", athens_weekdays.shape) print("athens_weekends = ", athens_weekends.shape) print("barcelona_weekdays = ", barcelona_weekdays.shape) print("barcelona_weekends = ", barcelona_weekends.shape) print("berlin_weekdays = ", berlin_weekdays.shape) print("berlin_weekends = ", berlin_weekends.shape) print("budapest_weekdays = ", budapest_weekdays.shape) print("budapest_weekends = ", budapest_weekends.shape) print("lisbon_weekdays = ", lisbon_weekdays.shape) print("lisbon_weekends = ", lisbon_weekends.shape) print("london_weekdays = ", london_weekdays.shape) print("london_weekends = ", london_weekends.shape) print("paris_weekdays = ", paris_weekdays.shape) print("paris_weekends = ", paris_weekends.shape) print("rome_weekdays = ", rome_weekdays.shape) print("rome_weekends = ", rome_weekends.shape) print("vienna_weekdays = ", vienna_weekdays.shape) print("vienna_weekends = ", vienna_weekends.shape) # **FEATURES OF THE DATASET** print(amsterdam_weekdays.columns) print(amsterdam_weekends.columns) print(athens_weekdays.columns) print(athens_weekends.columns) print(barcelona_weekdays.columns) print(barcelona_weekends.columns) print(berlin_weekdays.columns) print(berlin_weekends.columns) print(budapest_weekdays.columns) print(budapest_weekends.columns) print(lisbon_weekdays.columns) print(lisbon_weekends.columns) print(london_weekdays.columns) print(london_weekends.columns) print(paris_weekdays.columns) print(paris_weekends.columns) print(rome_weekdays.columns) print(rome_weekends.columns) print(vienna_weekdays.columns) print(vienna_weekends.columns) # We can see that the number of features in all files are the same; however, the number of records is different. # If we observe the list of feature names, we can see that all datasets have the same number, as well as the same features. So, they can get together in order to convert all these different datasets into a single dataset. # **COMBINE ALL DATASETS INTO A SINGLE ONE** # def combine(csv_1, col_1, csv_2, col_2, city): # We combine the "weekdays" and "weekends" datasets of the individual datasets into 1 datset for a particular city. csv_1["week time"] = col_1 csv_2["week time"] = col_2 # We remove the "Unnamed: 0" feature since it's the index number of the records and isn't useful csv_1.drop(columns=["Unnamed: 0"], inplace=True) csv_2.drop(columns=["Unnamed: 0"], inplace=True) merged = pd.concat([csv_1, csv_2]) # We add the name of the city and we put it into a new column "city", since we will be combining all these cities datasets and we would need the differentiate the data in some way for analysis and insights merged["city"] = city return merged amsterdam = combine( amsterdam_weekdays, "weekdays", amsterdam_weekends, "weekends", "amsterdam" ) athens = combine(athens_weekdays, "weekdays", athens_weekends, "weekends", "athens") barcelona = combine( barcelona_weekdays, "weekdays", barcelona_weekends, "weekends", "barcelona" ) berlin = combine(berlin_weekdays, "weekdays", berlin_weekends, "weekends", "berlin") budapest = combine( budapest_weekdays, "weekdays", budapest_weekends, "weekends", "budapest" ) lisbon = combine(lisbon_weekdays, "weekdays", lisbon_weekends, "weekends", "lisbon") london = combine(london_weekdays, "weekdays", london_weekends, "weekends", "london") paris = combine(paris_weekdays, "weekdays", paris_weekends, "weekends", "paris") rome = combine(rome_weekdays, "weekdays", rome_weekends, "weekends", "rome") vienna = combine(vienna_weekdays, "weekdays", vienna_weekends, "weekends", "vienna") cities_names = [ "amsterdam", "athens", "barcelona", "berlin", "budapest", "lisbon", "london", "paris", "rome", "vienna", ] cities = [ amsterdam, athens, barcelona, berlin, budapest, lisbon, london, paris, rome, vienna, ] # we use concat function of pandas and we vertically stacked data of all cities, to transform them into a single data called "airbnb_data" airbnb_data = pd.concat(cities, ignore_index=True) airbnb_data # ## 1. Data cleaning and preprocessing # We show first 5 lines of our dataset with function "head": airbnb_data.head() # We check if there is NaN or null values in the dataset: airbnb_data.isna().sum()
false
20
5,344
0
6,172
5,344
129066927
<jupyter_start><jupyter_text>House Prediction Dataset ### Context By analyzing these Bangalore house data we will determine the approximate price for the houses. ### Content All the necessary information about that house id given like size, area , price, number of balcony/ bathrooms. Kaggle dataset identifier: house-prediction-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np df = pd.read_csv("/kaggle/input/house-prediction-dataset/Bengaluru_House_Data.csv") df.head() df["society"].isnull() df["society"].isnull().sum() df.head(-1) df["size"].isnull().sum() df["area_type"].isnull().sum() df["balcony"].isnull().sum() df["bath"].isnull().sum() df.info() df = df.drop(["society", "balcony"], axis=1, inplace=False) df.head() df.describe().T unique_bath = df1["bath"].unique() unique_bath_list = unique_bath.tolist() unique_bath_list len(unique_bath_list) import matplotlib.pyplot as plt import seaborn as sns bath = df["bath"] bath_list = bath.tolist() bath_freq_dict = {} bath = df["bath"] bath_list = bath.tolist() bath_freq_dict = {} for i in bath_list: if i not in bath_freq_dict: bath_freq_dict[i] = 1 else: bath_freq_dict[i] += 1 bath_freq_dict bath_name, bath_count = [], [] for bath, count in bath_freq_dict.items(): bath_name.append(bath) bath_count.append(count) bath_name = list(bath_freq_dict.keys()) bath_count = list(bath_freq_dict.values()) plt.bar(bath_name, bath_count) plt.xticks(rotation=90)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/066/129066927.ipynb
house-prediction-dataset
shantanudhakadd
[{"Id": 129066927, "ScriptId": 38366467, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15033707, "CreationDate": "05/10/2023 17:53:13", "VersionNumber": 1.0, "Title": "notebookd0a2746019", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 83.0, "LinesInsertedFromPrevious": 83.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184794525, "KernelVersionId": 129066927, "SourceDatasetVersionId": 3243436}]
[{"Id": 3243436, "DatasetId": 1965889, "DatasourceVersionId": 3293637, "CreatorUserId": 9547290, "LicenseName": "CC0: Public Domain", "CreationDate": "03/01/2022 16:54:58", "VersionNumber": 1.0, "Title": "House Prediction Dataset", "Slug": "house-prediction-dataset", "Subtitle": "House Price Dataset for Price Prediction", "Description": "### Context\n\nBy analyzing these Bangalore house data we will determine the approximate price for the houses.\n\n\n### Content\n\nAll the necessary information about that house id given like size, area , price, number of balcony/ bathrooms. \n\n\n### Acknowledgements\n\nWhat are the things that a potential home buyer considers before purchasing a house? The location, the size of the property, vicinity to offices, schools, parks, restaurants, hospitals or the stereotypical white picket fence? What about the most important factor \u2014 the price?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1965889, "CreatorUserId": 9547290, "OwnerUserId": 9547290.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3243436.0, "CurrentDatasourceVersionId": 3293637.0, "ForumId": 1989980, "Type": 2, "CreationDate": "03/01/2022 16:54:58", "LastActivityDate": "03/01/2022", "TotalViews": 8516, "TotalDownloads": 1213, "TotalVotes": 27, "TotalKernels": 3}]
[{"Id": 9547290, "UserName": "shantanudhakadd", "DisplayName": "Shantanu Dhakad", "RegisterDate": "02/01/2022", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np df = pd.read_csv("/kaggle/input/house-prediction-dataset/Bengaluru_House_Data.csv") df.head() df["society"].isnull() df["society"].isnull().sum() df.head(-1) df["size"].isnull().sum() df["area_type"].isnull().sum() df["balcony"].isnull().sum() df["bath"].isnull().sum() df.info() df = df.drop(["society", "balcony"], axis=1, inplace=False) df.head() df.describe().T unique_bath = df1["bath"].unique() unique_bath_list = unique_bath.tolist() unique_bath_list len(unique_bath_list) import matplotlib.pyplot as plt import seaborn as sns bath = df["bath"] bath_list = bath.tolist() bath_freq_dict = {} bath = df["bath"] bath_list = bath.tolist() bath_freq_dict = {} for i in bath_list: if i not in bath_freq_dict: bath_freq_dict[i] = 1 else: bath_freq_dict[i] += 1 bath_freq_dict bath_name, bath_count = [], [] for bath, count in bath_freq_dict.items(): bath_name.append(bath) bath_count.append(count) bath_name = list(bath_freq_dict.keys()) bath_count = list(bath_freq_dict.values()) plt.bar(bath_name, bath_count) plt.xticks(rotation=90)
false
1
615
0
695
615
129066678
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # **Load Dataset** train = pd.read_csv("/kaggle/input/titanic/train.csv") train.head() test = pd.read_csv("/kaggle/input/titanic/test.csv") test_passengerIDs = test["PassengerId"] test.head() # # **Understanding the data** train.shape test.shape train.info() # age has nulls, cabin has too many nulls, embarked has just 2 nulls test.info() # age has nulls, cabin has too many nulls # Check duplicates train.duplicated().sum() test.duplicated().sum() women = train.loc[train.Sex == "female"]["Survived"] women_sur_rate = sum(women) / len(women) print("% of women who survived:", women_sur_rate) men = train.loc[train.Sex == "male"]["Survived"] men_sur_rate = sum(men) / len(men) print("% of men who survived:", men_sur_rate) # visualizing missing data import missingno as msno msno.matrix(train) msno.matrix(test) # removing columns that seems unimportant train.drop(["PassengerId", "Ticket", "Cabin", "Name"], axis=1, inplace=True) test.drop(["PassengerId", "Ticket", "Cabin", "Name"], axis=1, inplace=True) # replace missing age with mean train.Age.fillna(train.Age.mean(), inplace=True) test.Age.fillna(test.Age.mean(), inplace=True) # replace missing fare in test data with mean test.Fare.fillna(test.Fare.mean(), inplace=True) # removing two rows of data where embarked is null train.dropna(inplace=True) msno.matrix(train) msno.matrix(test) # # **EDA** import seaborn as sns from matplotlib import pyplot as plt import plotly.express as px for cols in ["Sex", "Embarked", "Pclass", "SibSp", "Parch"]: print(cols) sns.countplot(data=train, x=cols, hue="Survived") plt.show() print("") px.histogram(train, x="Fare", nbins=10) train["Per_person_fare"] = train.Fare / (train.SibSp + train.Parch + 1) test["Per_person_fare"] = test.Fare / (test.SibSp + test.Parch + 1) train.head() px.histogram(train, x="Per_person_fare", nbins=10) plt.figure(figsize=(12, 8)) sns.heatmap(train.corr(), annot=True) # Correlation of train dataset each other values # # **Encode Data** train.replace({"female": 0, "male": 1}, inplace=True) train = pd.get_dummies(train, columns=["Embarked"], prefix="Embarked") test.replace({"female": 0, "male": 1}, inplace=True) test = pd.get_dummies(test, columns=["Embarked"], prefix="Embarked") msno.matrix(train) msno.matrix(test) # # **Model Predictions** y = train["Survived"] x = train.drop(["Survived", "Embarked_Q"], axis=1) test = test.drop("Embarked_Q", axis=1) from xgboost import XGBClassifier from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import ( RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, ) from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler min_max_scaler = preprocessing.MinMaxScaler() from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=20 ) models = { "Logistic Regression": LogisticRegression(), "Decision Tree": DecisionTreeClassifier(), "Random Forest": RandomForestClassifier(), "SVM": SVC(), "XGBClassifier": XGBClassifier(), "GradientBoostingClassifier": GradientBoostingClassifier(), "AdaBoostClassifier": AdaBoostClassifier(), } for name, model in models.items(): print(f"{name} : ") # Fit the model model.fit(x_train, y_train) y_pred = model.predict(x_test) # Evaluate the model acc = accuracy_score(y_test, y_pred) prec = precision_score(y_test, y_pred) rec = recall_score(y_test, y_pred) f1 = f1_score(y_test, y_pred) print(f"Accuracy: {acc:.3f}") print(f"Precision: {prec:.3f}") print(f"Recall: {rec:.3f}") print(f"F1-score: {f1:.3f}") print() Gboost = GradientBoostingClassifier() Gboost.fit(x_train, y_train) y_pred = Gboost.predict(x_test) acc = accuracy_score(y_test, y_pred) prec = precision_score(y_test, y_pred) rec = recall_score(y_test, y_pred) f1 = f1_score(y_test, y_pred) print("Accuracy:", acc) print("Precison:", prec) print("Recall:", rec) print("F1-score:", f1) test.head() test_pred = Gboost.predict(test) test_pred # Join test set with predictions for viewing test["Sur_prediction"] = test_pred.tolist() test.head(20) # # **Subission** df = pd.DataFrame( { "PassengerId": test_passengerIDs.values, "Survived": test_pred, } ) df.to_csv("submission.csv", index=False) df.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/066/129066678.ipynb
null
null
[{"Id": 129066678, "ScriptId": 38344913, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1290211, "CreationDate": "05/10/2023 17:50:36", "VersionNumber": 5.0, "Title": "Titanic survival prediction", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 189.0, "LinesInsertedFromPrevious": 80.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 109.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # **Load Dataset** train = pd.read_csv("/kaggle/input/titanic/train.csv") train.head() test = pd.read_csv("/kaggle/input/titanic/test.csv") test_passengerIDs = test["PassengerId"] test.head() # # **Understanding the data** train.shape test.shape train.info() # age has nulls, cabin has too many nulls, embarked has just 2 nulls test.info() # age has nulls, cabin has too many nulls # Check duplicates train.duplicated().sum() test.duplicated().sum() women = train.loc[train.Sex == "female"]["Survived"] women_sur_rate = sum(women) / len(women) print("% of women who survived:", women_sur_rate) men = train.loc[train.Sex == "male"]["Survived"] men_sur_rate = sum(men) / len(men) print("% of men who survived:", men_sur_rate) # visualizing missing data import missingno as msno msno.matrix(train) msno.matrix(test) # removing columns that seems unimportant train.drop(["PassengerId", "Ticket", "Cabin", "Name"], axis=1, inplace=True) test.drop(["PassengerId", "Ticket", "Cabin", "Name"], axis=1, inplace=True) # replace missing age with mean train.Age.fillna(train.Age.mean(), inplace=True) test.Age.fillna(test.Age.mean(), inplace=True) # replace missing fare in test data with mean test.Fare.fillna(test.Fare.mean(), inplace=True) # removing two rows of data where embarked is null train.dropna(inplace=True) msno.matrix(train) msno.matrix(test) # # **EDA** import seaborn as sns from matplotlib import pyplot as plt import plotly.express as px for cols in ["Sex", "Embarked", "Pclass", "SibSp", "Parch"]: print(cols) sns.countplot(data=train, x=cols, hue="Survived") plt.show() print("") px.histogram(train, x="Fare", nbins=10) train["Per_person_fare"] = train.Fare / (train.SibSp + train.Parch + 1) test["Per_person_fare"] = test.Fare / (test.SibSp + test.Parch + 1) train.head() px.histogram(train, x="Per_person_fare", nbins=10) plt.figure(figsize=(12, 8)) sns.heatmap(train.corr(), annot=True) # Correlation of train dataset each other values # # **Encode Data** train.replace({"female": 0, "male": 1}, inplace=True) train = pd.get_dummies(train, columns=["Embarked"], prefix="Embarked") test.replace({"female": 0, "male": 1}, inplace=True) test = pd.get_dummies(test, columns=["Embarked"], prefix="Embarked") msno.matrix(train) msno.matrix(test) # # **Model Predictions** y = train["Survived"] x = train.drop(["Survived", "Embarked_Q"], axis=1) test = test.drop("Embarked_Q", axis=1) from xgboost import XGBClassifier from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import ( RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, ) from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler min_max_scaler = preprocessing.MinMaxScaler() from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=20 ) models = { "Logistic Regression": LogisticRegression(), "Decision Tree": DecisionTreeClassifier(), "Random Forest": RandomForestClassifier(), "SVM": SVC(), "XGBClassifier": XGBClassifier(), "GradientBoostingClassifier": GradientBoostingClassifier(), "AdaBoostClassifier": AdaBoostClassifier(), } for name, model in models.items(): print(f"{name} : ") # Fit the model model.fit(x_train, y_train) y_pred = model.predict(x_test) # Evaluate the model acc = accuracy_score(y_test, y_pred) prec = precision_score(y_test, y_pred) rec = recall_score(y_test, y_pred) f1 = f1_score(y_test, y_pred) print(f"Accuracy: {acc:.3f}") print(f"Precision: {prec:.3f}") print(f"Recall: {rec:.3f}") print(f"F1-score: {f1:.3f}") print() Gboost = GradientBoostingClassifier() Gboost.fit(x_train, y_train) y_pred = Gboost.predict(x_test) acc = accuracy_score(y_test, y_pred) prec = precision_score(y_test, y_pred) rec = recall_score(y_test, y_pred) f1 = f1_score(y_test, y_pred) print("Accuracy:", acc) print("Precison:", prec) print("Recall:", rec) print("F1-score:", f1) test.head() test_pred = Gboost.predict(test) test_pred # Join test set with predictions for viewing test["Sur_prediction"] = test_pred.tolist() test.head(20) # # **Subission** df = pd.DataFrame( { "PassengerId": test_passengerIDs.values, "Survived": test_pred, } ) df.to_csv("submission.csv", index=False) df.to_csv("submission.csv", index=False)
false
0
1,740
0
1,740
1,740
129185000
<jupyter_start><jupyter_text>1700+ K-Pop Idols Dataset The world of K-Pop is one of the most popular and dynamic music industries in the world. This comprehensive dataset provides information on over 1,700 K-Pop idols, including their stage name, full name, Korean name, birthdate, height, weight, birthplace, and former and other group affiliations. This dataset is perfect for anyone interested in exploring the K-Pop industry or conducting research on K-Pop idols. Kaggle dataset identifier: all-kpop-idols <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/all-kpop-idols/kpopidolsv3.csv") df.head() df.info() import matplotlib.pyplot as plt import seaborn as sns df.Group.unique() group_count = df["Group"].value_counts() group_count = group_count[:10,] group_count sns.barplot(y=group_count.index, x=group_count.values) company_count = df["Company"].value_counts() company_count = company_count[:10,] sns.barplot(x=company_count.values, y=company_count.index) df["Group"].count() sns.countplot(x="Gender", data=df) df_idol = df[["Group", "Company"]] df_idol.head() df_idol_count = df_idol.groupby(["Company"])["Group"].count().reset_index(name="count") df_idol_count.head() df_idol_count = df_idol_count.sort_values(by="count", ascending=False).head(5) df_idol_count # Top 5 companies have the most boy bands/girl groups in Kpop industry. sns.barplot(x=df_idol_count["Company"], y=df_idol_count["count"], data=df_idol_count) df.columns df["Date of Birth"] = pd.to_datetime(df["Date of Birth"]) df["year"], df["month"], df["day"] = ( df["Date of Birth"].apply(lambda x: x.year), df["Date of Birth"].apply(lambda x: x.month), df["Date of Birth"].apply(lambda x: x.day), ) df.head(5) df_2country = df.loc[df["Second Country"].notna()] df_2country sns.countplot(y="Second Country", data=df_2country) twocountry_count = df_2country["Second Country"].value_counts() twocountry_count = twocountry_count[:5,] twocountry_count # top 5 second country sns.barplot(x=twocountry_count.index, y=twocountry_count.values) company_second = df_2country["Company"].value_counts() company_second = company_second[:5,] company_second sns.barplot(x=company_second.index, y=company_second.values) df_se_kr = df_2country[df_2country["Second Country"] == "South Korea"] df_se_kr kr_second_count = df_se_kr["Company"].value_counts() kr_second_count = kr_second_count[:5,] kr_second_count sns.barplot(x=kr_second_count.index, y=kr_second_count.values) # **SM COMPANY** df_sm = df[df["Company"] == "SM"] df_sm.head() df_sm.info() df_sm.Country.unique() # Where are they from? s = df_sm["Country"].value_counts() ax = sns.barplot(x=s.index, y=s.values, order=s.index) ax.bar_label(ax.containers[0]) # Gender sns.countplot(x="Gender", data=df_sm) df_sm.Group.unique() sns.countplot(y="year", data=df_sm) sns.boxplot(x="Height", data=df_sm) sns.boxplot(x="Weight", data=df_sm) df_male = df[df["Gender"] == "M"] df_male.head() df_male.describe() print(sns.boxplot(x="Height", data=df_male)) print(sns.boxplot(x="Weight", data=df_male)) df_female = df[df["Gender"] == "F"] df_female df_female.describe() sns.boxplot(x="Height", data=df_female) sns.boxplot(x="Weight", data=df_female) df_sm["Debut"] = pd.to_datetime(df_sm["Debut"]) df_sm["year_debut"] = df_sm["Debut"].apply(lambda x: x.year) df_sm.head(5) df_debut = df_sm[["Group", "year_debut"]] df_debut.head(5) df_debut.info() df_debut["Group"].unique() df_debut = df_debut.drop_duplicates(subset="Group") # Timeline sm idols debut df_debut.sort_values(by="year_debut", ascending=True) # **Thai members** df_thai = df[df["Country"] == "Thailand"] df_thai sns.countplot(x="Company", data=df_thai) sns.countplot(x="Gender", data=df_thai) sns.countplot(y="year", data=df_thai)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/185/129185000.ipynb
all-kpop-idols
nicolsalayoarias
[{"Id": 129185000, "ScriptId": 38379589, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5611293, "CreationDate": "05/11/2023 16:09:26", "VersionNumber": 3.0, "Title": "data visualisation of Kpop idols", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 167.0, "LinesInsertedFromPrevious": 55.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 112.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 185007568, "KernelVersionId": 129185000, "SourceDatasetVersionId": 5612836}]
[{"Id": 5612836, "DatasetId": 3101937, "DatasourceVersionId": 5687994, "CreatorUserId": 9622214, "LicenseName": "CC0: Public Domain", "CreationDate": "05/05/2023 21:19:07", "VersionNumber": 4.0, "Title": "1700+ K-Pop Idols Dataset", "Slug": "all-kpop-idols", "Subtitle": "Exploring the world of K-Pop Idols: A comprehensive dataset", "Description": "The world of K-Pop is one of the most popular and dynamic music industries in the world. This comprehensive dataset provides information on over 1,700 K-Pop idols, including their stage name, full name, Korean name, birthdate, height, weight, birthplace, and former and other group affiliations. This dataset is perfect for anyone interested in exploring the K-Pop industry or conducting research on K-Pop idols.", "VersionNotes": "Version 3", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3101937, "CreatorUserId": 9622214, "OwnerUserId": 9622214.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5612836.0, "CurrentDatasourceVersionId": 5687994.0, "ForumId": 3165186, "Type": 2, "CreationDate": "04/07/2023 21:04:02", "LastActivityDate": "04/07/2023", "TotalViews": 6474, "TotalDownloads": 651, "TotalVotes": 35, "TotalKernels": 3}]
[{"Id": 9622214, "UserName": "nicolsalayoarias", "DisplayName": "Nicol\u00e1s Alayo", "RegisterDate": "02/09/2022", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/all-kpop-idols/kpopidolsv3.csv") df.head() df.info() import matplotlib.pyplot as plt import seaborn as sns df.Group.unique() group_count = df["Group"].value_counts() group_count = group_count[:10,] group_count sns.barplot(y=group_count.index, x=group_count.values) company_count = df["Company"].value_counts() company_count = company_count[:10,] sns.barplot(x=company_count.values, y=company_count.index) df["Group"].count() sns.countplot(x="Gender", data=df) df_idol = df[["Group", "Company"]] df_idol.head() df_idol_count = df_idol.groupby(["Company"])["Group"].count().reset_index(name="count") df_idol_count.head() df_idol_count = df_idol_count.sort_values(by="count", ascending=False).head(5) df_idol_count # Top 5 companies have the most boy bands/girl groups in Kpop industry. sns.barplot(x=df_idol_count["Company"], y=df_idol_count["count"], data=df_idol_count) df.columns df["Date of Birth"] = pd.to_datetime(df["Date of Birth"]) df["year"], df["month"], df["day"] = ( df["Date of Birth"].apply(lambda x: x.year), df["Date of Birth"].apply(lambda x: x.month), df["Date of Birth"].apply(lambda x: x.day), ) df.head(5) df_2country = df.loc[df["Second Country"].notna()] df_2country sns.countplot(y="Second Country", data=df_2country) twocountry_count = df_2country["Second Country"].value_counts() twocountry_count = twocountry_count[:5,] twocountry_count # top 5 second country sns.barplot(x=twocountry_count.index, y=twocountry_count.values) company_second = df_2country["Company"].value_counts() company_second = company_second[:5,] company_second sns.barplot(x=company_second.index, y=company_second.values) df_se_kr = df_2country[df_2country["Second Country"] == "South Korea"] df_se_kr kr_second_count = df_se_kr["Company"].value_counts() kr_second_count = kr_second_count[:5,] kr_second_count sns.barplot(x=kr_second_count.index, y=kr_second_count.values) # **SM COMPANY** df_sm = df[df["Company"] == "SM"] df_sm.head() df_sm.info() df_sm.Country.unique() # Where are they from? s = df_sm["Country"].value_counts() ax = sns.barplot(x=s.index, y=s.values, order=s.index) ax.bar_label(ax.containers[0]) # Gender sns.countplot(x="Gender", data=df_sm) df_sm.Group.unique() sns.countplot(y="year", data=df_sm) sns.boxplot(x="Height", data=df_sm) sns.boxplot(x="Weight", data=df_sm) df_male = df[df["Gender"] == "M"] df_male.head() df_male.describe() print(sns.boxplot(x="Height", data=df_male)) print(sns.boxplot(x="Weight", data=df_male)) df_female = df[df["Gender"] == "F"] df_female df_female.describe() sns.boxplot(x="Height", data=df_female) sns.boxplot(x="Weight", data=df_female) df_sm["Debut"] = pd.to_datetime(df_sm["Debut"]) df_sm["year_debut"] = df_sm["Debut"].apply(lambda x: x.year) df_sm.head(5) df_debut = df_sm[["Group", "year_debut"]] df_debut.head(5) df_debut.info() df_debut["Group"].unique() df_debut = df_debut.drop_duplicates(subset="Group") # Timeline sm idols debut df_debut.sort_values(by="year_debut", ascending=True) # **Thai members** df_thai = df[df["Country"] == "Thailand"] df_thai sns.countplot(x="Company", data=df_thai) sns.countplot(x="Gender", data=df_thai) sns.countplot(y="year", data=df_thai)
false
1
1,435
2
1,565
1,435
129185327
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## 1. Understand & Import the data import numpy as np import pandas as pd import matplotlib as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder le = LabelEncoder() train_df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv") test_df = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv") sub_df = pd.read_csv("/kaggle/input/spaceship-titanic/sample_submission.csv") train_df.head() test_df.head() sub_df.head() print(train_df.shape) print(test_df.shape) print(sub_df.shape) train_df.describe() train_df.info() # Passenger ID and Passenger Name does not add much value during model building hence, deleting columns from train dataset train_df.drop(columns=["PassengerId", "Name"], inplace=True) # ## 2. Data Preprocessing train_df.isnull().sum() # ## 2.1 Seperate the Numerical columns and Categorical columns cat_df = train_df.select_dtypes(exclude=["int", "float"]) from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy="most_frequent") cat_df_imputed = imputer.fit_transform(cat_df) cat_df_final = pd.DataFrame( cat_df_imputed, columns=["HomePlanet", "CryoSleep", "Cabin", "Destination", "VIP", "Transported"], ) num_df = train_df.select_dtypes(exclude=["object", "bool"]) from sklearn.impute import KNNImputer knnimputer = KNNImputer(n_neighbors=2) num_df_imputed = knnimputer.fit_transform(num_df) num_df_final = pd.DataFrame( num_df_imputed, columns=["Age", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"], ) # Apply Label Encoding Techniques to Categorical Columns for col in cat_df_final.columns: cat_df_final[col] = le.fit_transform(cat_df_final[col]) cat_df_final.head(2) # ## 2.2 Merge Categorical and Numerical columns final_df = pd.concat([num_df_final, cat_df_final], axis=1) final_df.head(2) final_df.isnull().sum() # ## 3. Split the dataset into Training and Testing X = final_df.iloc[:, 0:11].values y = final_df["Transported"].values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=12 ) # ## 4.1 Logistic Regression from sklearn.linear_model import LogisticRegression from sklearn.metrics import ( accuracy_score, confusion_matrix, ConfusionMatrixDisplay, f1_score, ) lr_model = LogisticRegression(max_iter=2000, verbose=0) lr_model.fit(X_train, y_train) y_preds_lr = lr_model.predict(X_test) lr_score = accuracy_score(y_test, y_preds_lr) print(f"Accuracy Score:{lr_score}") f1_lr = f1_score(y_test, y_preds_lr, average="weighted") print(f"F1 Score:{f1_lr}") # ## 4.2 Random Forest - Ensemble # Random Forest Classifier -Ensemble Learning from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=800, max_depth=10) rf_model = rf.fit(X_train, y_train) y_pred_rf = rf_model.predict(X_test) rf_score = accuracy_score(y_test, y_pred_rf) print(f"Accuracy Score:{lr_score}") f1_rf = f1_score(y_test, y_pred_rf, average="weighted") print(f"F1 Score is:{f1_rf}") # ## 4.3 Decision Tree Model # Decision Tree Classifier from sklearn.model_selection import GridSearchCV param_grid = { "max_depth": [3, 5, 7], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 3], } from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier() CV = GridSearchCV(estimator=dt, param_grid=param_grid, cv=5) dt_model = CV.fit(X_train, y_train) y_pred_dt = dt_model.predict(X_test) dt_score = accuracy_score(y_test, y_pred_dt) print(f"Accuracy Score:{dt_score}") f1_dt = f1_score(y_test, y_pred_dt, average="weighted") print(f"F1 Score:{f1_dt}") # ## 4.4 Gradient Boosting Model from sklearn.ensemble import GradientBoostingClassifier gb = GradientBoostingClassifier( n_estimators=100, learning_rate=0.1, max_depth=3, random_state=42 ) gb_model = gb.fit(X_train, y_train) y_pred_gb = gb_model.predict(X_test) gb_score = accuracy_score(y_test, y_pred_gb) print(f"Accuracy Score:{gb_score}") f1_gb = f1_score(y_test, y_pred_gb, average="weighted") print(f"F1 Score:{f1_gb}") # ## 4.5 Confusion Matrix classes = ["Aggresive", "Normal"] ConfusionMatrixDisplay.from_predictions(y_test, y_pred_gb, display_labels=classes) plt.rcParams["figure.figsize"] = [6, 6] # ## 5. Prediction on Test DataFrame test_df.head() test_df.drop(columns=["PassengerId", "Name"], axis=1, inplace=True) test_df.isnull().sum() # Replace missing Object values with Imputer Techniques cat_df = test_df.select_dtypes(exclude=["int", "float"]) from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy="most_frequent") cat_df_imputed = imputer.fit_transform(cat_df) cat_df_final = pd.DataFrame( cat_df_imputed, columns=["HomePlanet", "CryoSleep", "Cabin", "Destination", "VIP"] ) # Numerical Columns of Test Data num_df = test_df.select_dtypes(exclude=["object", "bool"]) from sklearn.impute import KNNImputer knnimputer = KNNImputer(n_neighbors=2) num_df_imputed = knnimputer.fit_transform(num_df) num_df_final = pd.DataFrame( num_df_imputed, columns=["Age", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"], ) # Covert Categorical datatypes to Numerical for col in cat_df_final.columns: cat_df_final[col] = le.fit_transform(cat_df_final[col]) # Concat categorical column and Numerical column final_test_df = pd.concat([cat_df_final, num_df_final], axis=1) # Hence, We used Gradient Boosting Model as accuracy achieved Highest during training y_pred = gb_model.predict(final_test_df) y_pred y_pred_final = y_pred.astype("bool") sub_df.head() y_pred_sub = y_pred_final.tolist() # ## 6. Submit final solution Submission = pd.DataFrame( {"PassengerId": sub_df["PassengerId"], "Transported": y_pred_final} ) Submission.head(5) Submission.to_csv("Submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/185/129185327.ipynb
null
null
[{"Id": 129185327, "ScriptId": 36546030, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11219290, "CreationDate": "05/11/2023 16:12:22", "VersionNumber": 4.0, "Title": "Passenger transported model 79.25% model accuracy", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 197.0, "LinesInsertedFromPrevious": 92.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 105.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## 1. Understand & Import the data import numpy as np import pandas as pd import matplotlib as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder le = LabelEncoder() train_df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv") test_df = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv") sub_df = pd.read_csv("/kaggle/input/spaceship-titanic/sample_submission.csv") train_df.head() test_df.head() sub_df.head() print(train_df.shape) print(test_df.shape) print(sub_df.shape) train_df.describe() train_df.info() # Passenger ID and Passenger Name does not add much value during model building hence, deleting columns from train dataset train_df.drop(columns=["PassengerId", "Name"], inplace=True) # ## 2. Data Preprocessing train_df.isnull().sum() # ## 2.1 Seperate the Numerical columns and Categorical columns cat_df = train_df.select_dtypes(exclude=["int", "float"]) from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy="most_frequent") cat_df_imputed = imputer.fit_transform(cat_df) cat_df_final = pd.DataFrame( cat_df_imputed, columns=["HomePlanet", "CryoSleep", "Cabin", "Destination", "VIP", "Transported"], ) num_df = train_df.select_dtypes(exclude=["object", "bool"]) from sklearn.impute import KNNImputer knnimputer = KNNImputer(n_neighbors=2) num_df_imputed = knnimputer.fit_transform(num_df) num_df_final = pd.DataFrame( num_df_imputed, columns=["Age", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"], ) # Apply Label Encoding Techniques to Categorical Columns for col in cat_df_final.columns: cat_df_final[col] = le.fit_transform(cat_df_final[col]) cat_df_final.head(2) # ## 2.2 Merge Categorical and Numerical columns final_df = pd.concat([num_df_final, cat_df_final], axis=1) final_df.head(2) final_df.isnull().sum() # ## 3. Split the dataset into Training and Testing X = final_df.iloc[:, 0:11].values y = final_df["Transported"].values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=12 ) # ## 4.1 Logistic Regression from sklearn.linear_model import LogisticRegression from sklearn.metrics import ( accuracy_score, confusion_matrix, ConfusionMatrixDisplay, f1_score, ) lr_model = LogisticRegression(max_iter=2000, verbose=0) lr_model.fit(X_train, y_train) y_preds_lr = lr_model.predict(X_test) lr_score = accuracy_score(y_test, y_preds_lr) print(f"Accuracy Score:{lr_score}") f1_lr = f1_score(y_test, y_preds_lr, average="weighted") print(f"F1 Score:{f1_lr}") # ## 4.2 Random Forest - Ensemble # Random Forest Classifier -Ensemble Learning from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=800, max_depth=10) rf_model = rf.fit(X_train, y_train) y_pred_rf = rf_model.predict(X_test) rf_score = accuracy_score(y_test, y_pred_rf) print(f"Accuracy Score:{lr_score}") f1_rf = f1_score(y_test, y_pred_rf, average="weighted") print(f"F1 Score is:{f1_rf}") # ## 4.3 Decision Tree Model # Decision Tree Classifier from sklearn.model_selection import GridSearchCV param_grid = { "max_depth": [3, 5, 7], "min_samples_split": [2, 5, 10], "min_samples_leaf": [1, 2, 3], } from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier() CV = GridSearchCV(estimator=dt, param_grid=param_grid, cv=5) dt_model = CV.fit(X_train, y_train) y_pred_dt = dt_model.predict(X_test) dt_score = accuracy_score(y_test, y_pred_dt) print(f"Accuracy Score:{dt_score}") f1_dt = f1_score(y_test, y_pred_dt, average="weighted") print(f"F1 Score:{f1_dt}") # ## 4.4 Gradient Boosting Model from sklearn.ensemble import GradientBoostingClassifier gb = GradientBoostingClassifier( n_estimators=100, learning_rate=0.1, max_depth=3, random_state=42 ) gb_model = gb.fit(X_train, y_train) y_pred_gb = gb_model.predict(X_test) gb_score = accuracy_score(y_test, y_pred_gb) print(f"Accuracy Score:{gb_score}") f1_gb = f1_score(y_test, y_pred_gb, average="weighted") print(f"F1 Score:{f1_gb}") # ## 4.5 Confusion Matrix classes = ["Aggresive", "Normal"] ConfusionMatrixDisplay.from_predictions(y_test, y_pred_gb, display_labels=classes) plt.rcParams["figure.figsize"] = [6, 6] # ## 5. Prediction on Test DataFrame test_df.head() test_df.drop(columns=["PassengerId", "Name"], axis=1, inplace=True) test_df.isnull().sum() # Replace missing Object values with Imputer Techniques cat_df = test_df.select_dtypes(exclude=["int", "float"]) from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy="most_frequent") cat_df_imputed = imputer.fit_transform(cat_df) cat_df_final = pd.DataFrame( cat_df_imputed, columns=["HomePlanet", "CryoSleep", "Cabin", "Destination", "VIP"] ) # Numerical Columns of Test Data num_df = test_df.select_dtypes(exclude=["object", "bool"]) from sklearn.impute import KNNImputer knnimputer = KNNImputer(n_neighbors=2) num_df_imputed = knnimputer.fit_transform(num_df) num_df_final = pd.DataFrame( num_df_imputed, columns=["Age", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"], ) # Covert Categorical datatypes to Numerical for col in cat_df_final.columns: cat_df_final[col] = le.fit_transform(cat_df_final[col]) # Concat categorical column and Numerical column final_test_df = pd.concat([cat_df_final, num_df_final], axis=1) # Hence, We used Gradient Boosting Model as accuracy achieved Highest during training y_pred = gb_model.predict(final_test_df) y_pred y_pred_final = y_pred.astype("bool") sub_df.head() y_pred_sub = y_pred_final.tolist() # ## 6. Submit final solution Submission = pd.DataFrame( {"PassengerId": sub_df["PassengerId"], "Transported": y_pred_final} ) Submission.head(5) Submission.to_csv("Submission.csv", index=False)
false
0
2,218
0
2,218
2,218
129185676
<jupyter_start><jupyter_text>cityscapes Kaggle dataset identifier: cityscapes <jupyter_script>import matplotlib.pyplot as plt import imageio # Questo è un esempio di un'immagine e la sua label img = imageio.imread( "/kaggle/input/cityscapes/Cityspaces/images/train/aachen/aachen_000000_000019_leftImg8bit.png" ) mask = imageio.imread( "/kaggle/input/cityscapes/Cityspaces/gtFine/train/aachen/aachen_000000_000019_gtFine_labelIds.png" ) plt.figure() fig, axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(16, 7)) axs[0].imshow(img) axs[0].set_title("Immagine RGB", fontsize=20, fontweight="bold") axs[1].imshow(mask) axs[1].set_title("Ground Truth", fontsize=20, fontweight="bold") import os import numpy as np import random import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torchvision import transforms as T from torchvision.transforms import functional as TF from torch.utils.data import Dataset, DataLoader from PIL import Image from torchsummary import summary class CityscapesDataset(Dataset): def __init__(self, image_dir, label_dir, transforms=None): self.imagepaths = [] self.labelpaths = [] self.image_dir = image_dir self.label_dir = label_dir self.transforms = transforms dir = [os.path.join(image_dir, dir) for dir in sorted(os.listdir(image_dir))] for dir1 in sorted(dir): self.imagepaths.extend( [os.path.join(dir1, dir) for dir in sorted(os.listdir(dir1))] ) dir = [os.path.join(label_dir, dir) for dir in sorted(os.listdir(label_dir))] for dir1 in dir: labelpaths = [os.path.join(dir1, dir) for dir in sorted(os.listdir(dir1))] for img in labelpaths: if "labelIds" in os.path.basename(img): self.labelpaths.append(img) def __len__(self): return len(self.imagepaths) def __getitem__(self, idx): img = Image.open(self.imagepaths[idx]) mask = Image.open(self.labelpaths[idx]) if self.transforms: img, mask = self.transform(img, mask) img = TF.to_tensor(img) mask = TF.to_tensor(mask) return img, mask def transform(self, image, mask): # Random crop permette di acquisire un ritaglio # di dimensioni custom dall'immagine (per esempio 128x128) image = TF.resize(image, [128, 128]) mask = TF.resize(mask, [128, 128]) # random.random() produce una variabile aleatoria compresa tra 0 e 1 # con distribuzione uniforme. # Serve per effettuare un flip orizzontale sul ritaglio # con probabilità 0.5 if random.random() > 0.5: image = TF.hflip(image) mask = TF.hflip(mask) return image, mask class DoubleConv(nn.Module): def __init__(self, in_channels=3, out_channels=64): super(DoubleConv, self).__init__() self.doubleconv = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), ) def forward(self, x): return self.doubleconv(x) class UNet(nn.Module): def __init__(self, in_channels=3, out_classes=19, features=[64, 128, 256, 512]): super(UNet, self).__init__() self.ups = nn.ModuleList() self.downs = nn.ModuleList() self.pool = nn.MaxPool2d(kernel_size=2, stride=2) for feature in features: self.downs.append(DoubleConv(in_channels, feature)) in_channels = feature for feature in reversed(features): self.ups.append( nn.ConvTranspose2d(2 * feature, feature, kernel_size=2, stride=2) ) self.ups.append(DoubleConv(feature * 2, feature)) self.bootleneck = DoubleConv(features[-1], features[-1] * 2) self.final_conv = nn.Conv2d(features[0], out_classes, kernel_size=1) def forward(self, x): skip_connections = [] for down in self.downs: x = down(x) skip_connections.append(x) x = self.pool(x) x = self.bootleneck(x) skip_connections = skip_connections[::-1] for idx in range(0, len(self.ups), 2): x = self.ups[idx](x) skip_connection = skip_connections[idx // 2] if x.shape != skip_connection.shape: x = TF.resize(x, size=skip_connection.shape[2:]) concat_skip = torch.cat((skip_connection, x), dim=1) x = self.ups[idx + 1](concat_skip) x = self.final_conv(x) return x # device = "cuda" if torch.cuda.is_available() else "cpu" # model = UNet(3,34) # model=model.to(device) # print(summary(model,(3,128,128))) # testing dimension def test(): x = torch.randn((1, 3, 256, 256)) print(x.shape) model = UNet(3, 34) preds = model(x) print(preds.shape) test() # Definisci il numero di epoche e il learning rate epochs = 5 learning_rate = 0.1 model = UNet(3, 34) # e il dispositivo su cui far girare il codice ('cuda' o 'cpu') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Definire la loss function, che può essere custom o presa dal pacchetto nn batch_size = 1 model = model.to(device) loss_function = nn.CrossEntropyLoss(ignore_index=255) loss_function = loss_function.to(device) # definire l'ottimizzatore optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9) # Get train and val dataset instances train_dir = "/kaggle/input/cityscapes/Cityspaces/images/train" label_train_dir = "/kaggle/input/cityscapes/Cityspaces/gtFine/train" val_dir = "/kaggle/input/cityscapes/Cityspaces/images/val" label_val_dir = "/kaggle/input/cityscapes/Cityspaces/gtFine/val" train_dataset = CityscapesDataset( image_dir=train_dir, label_dir=label_train_dir, transforms=True ) valid_dataset = CityscapesDataset( image_dir=val_dir, label_dir=label_val_dir, transforms=True ) train_dataset, test_dataset = torch.utils.data.random_split(train_dataset, [0.7, 0.3]) # train_dataset.__getitem__(3) # Get train and val data loaders test_loader = DataLoader(test_dataset) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) val_loader = DataLoader(valid_dataset, batch_size=batch_size) def jaccard_loss(true, logits, eps=1e-7): """Computes the Jaccard loss, a.k.a the IoU loss. Note that PyTorch optimizers minimize a loss. In this case, we would like to maximize the jaccard loss so we return the negated jaccard loss. Args: true: a tensor of shape [B, H, W] or [B, 1, H, W]. logits: a tensor of shape [B, C, H, W]. Corresponds to the raw output or logits of the model. eps: added to the denominator for numerical stability. Returns: jacc_loss: the Jaccard loss. """ num_classes = logits.shape[1] if num_classes == 1: true_1_hot = torch.eye(num_classes + 1).to(device)[true.squeeze(1)] true_1_hot = true_1_hot.permute(0, 3, 1, 2).float() true_1_hot_f = true_1_hot[:, 0:1, :, :] true_1_hot_s = true_1_hot[:, 1:2, :, :] true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1) pos_prob = torch.sigmoid(logits) neg_prob = 1 - pos_prob probas = torch.cat([pos_prob, neg_prob], dim=1) else: true_1_hot = torch.eye(num_classes).to(device)[true.squeeze(1)] true_1_hot = true_1_hot.permute(0, 3, 1, 2).float() probas = F.softmax(logits, dim=1) true_1_hot = true_1_hot.type(logits.type()) dims = (0,) + tuple(range(2, true.ndimension())) intersection = torch.sum(probas * true_1_hot, dims) cardinality = torch.sum(probas + true_1_hot, dims) union = cardinality - intersection jacc_loss = (intersection / (union + eps)).mean() return 1 - jacc_loss # len(train_loader) # for data, target in train_loader: # print(data.shape) import time # si deve aggiustare IoU def train(epochs, model, train_loader, val_loader, loss, optimizer, patch=False): # torch.cuda.empty_cache() train_losses = [] test_losses = [] val_loss = [] val_iou = [] train_iou = [] min_loss = np.inf model.to(device) fit_time = time.time() for e in range(epochs): print("Starting epoch:", e + 1) since = time.time() running_loss = 0 iou_score = 0 # training loop model.train() for data, target in train_loader: # print('h') # training phase data = data.to(device) target = target.to(device) optimizer.zero_grad() # forward mask = target output = model(data) mask = torch.argmax(target, dim=1) # Loss and IoU evaluation loss = loss_function(output, mask) iou_score += jaccard_loss(mask, output).item() # backward loss.backward() optimizer.step() running_loss += loss.item() model.eval() val_loss = 0 val_iou_score = 0 # validation loop with torch.no_grad(): for data, target in val_loader: output = model(data.to(device)) target = target.to(device) mask = torch.argmax(target, dim=1) val_loss += F.cross_entropy(output, mask, reduction="sum").item() # Loss and IoU evaluation val_iou_score += jaccard_loss(target, output) loss = loss_function(output, mask) val_loss += loss.item() # Viene salvata la loss ad ogni epoca sia per il training che per il validation train_losses.append(running_loss / len(train_loader)) test_losses.append(val_loss / len(val_loader)) # Viene salvato il modello solo se la validation loss è migliore degli step precedenti. if min_loss > (val_loss / len(val_loader)): print( "Loss Decreasing.. {:.3f} >> {:.3f} ".format( min_loss, (val_loss / len(val_loader)) ) ) min_loss = val_loss / len(val_loader) print("saving model...") torch.save(model.state_dict(), "UNet_best.pt") # Viene salvata la IoU ad ogni epoca sia per il training che per il validation val_iou.append(val_iou_score / len(val_loader)) train_iou.append(iou_score / len(train_loader)) print( "Epoch:{}/{}..".format(e + 1, epochs), "Train Loss: {:.3f}..".format(running_loss / len(train_loader)), "Val Loss: {:.3f}..".format(val_loss / len(val_loader)), "Train mIoU:{:.3f}..".format(iou_score / len(train_loader)), "Val mIoU: {:.3f}..".format(val_iou_score / len(val_loader)), "Time: {:.2f}m".format((time.time() - since) / 60), ) history = { "train_loss": train_losses, "val_loss": test_losses, "train_miou": train_iou, "val_miou": val_iou, } print("Total time: {:.2f} m".format((time.time() - fit_time) / 60)) print("saving model...") torch.save(model, "UNet_last.pt") return history # history = train(epochs, model, train_loader, val_loader, loss_function, optimizer) random_idx = random.randint(0, len(train_dataset) - 1) image, mask = train_dataset[random_idx] with torch.no_grad(): for data, target in train_loader: image = data mask = target output = model(data.to(device)) # non so come visualizzare l'immagine dato che l'immagine in uscita ha 19 bande output = torch.argmax(output, dim=1) im = image[0].cpu().numpy() im = np.moveaxis(im, 0, -1) ma = mask[0].cpu().numpy() ma = np.squeeze(np.moveaxis(ma, 0, -1)) out = output[0].cpu().numpy() out = np.squeeze(np.moveaxis(out, 0, -1)) plt.figure() plt.subplot(131) plt.imshow(im) plt.subplot(132) plt.imshow(ma) plt.subplot(133) plt.imshow(out)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/185/129185676.ipynb
cityscapes
xiaose
[{"Id": 129185676, "ScriptId": 38398435, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12227462, "CreationDate": "05/11/2023 16:15:01", "VersionNumber": 2.0, "Title": "Fork of IPCV-CityScapes", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 363.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 357.0, "LinesInsertedFromFork": 6.0, "LinesDeletedFromFork": 6.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 357.0, "TotalVotes": 0}]
[{"Id": 185008798, "KernelVersionId": 129185676, "SourceDatasetVersionId": 1144795}]
[{"Id": 1144795, "DatasetId": 645942, "DatasourceVersionId": 1175461, "CreatorUserId": 4679475, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "05/10/2020 11:27:21", "VersionNumber": 1.0, "Title": "cityscapes", "Slug": "cityscapes", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 645942, "CreatorUserId": 4679475, "OwnerUserId": 4679475.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1144795.0, "CurrentDatasourceVersionId": 1175461.0, "ForumId": 660276, "Type": 2, "CreationDate": "05/10/2020 11:27:21", "LastActivityDate": "05/10/2020", "TotalViews": 8579, "TotalDownloads": 1751, "TotalVotes": 26, "TotalKernels": 30}]
[{"Id": 4679475, "UserName": "xiaose", "DisplayName": "xiaose", "RegisterDate": "03/16/2020", "PerformanceTier": 0}]
import matplotlib.pyplot as plt import imageio # Questo è un esempio di un'immagine e la sua label img = imageio.imread( "/kaggle/input/cityscapes/Cityspaces/images/train/aachen/aachen_000000_000019_leftImg8bit.png" ) mask = imageio.imread( "/kaggle/input/cityscapes/Cityspaces/gtFine/train/aachen/aachen_000000_000019_gtFine_labelIds.png" ) plt.figure() fig, axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(16, 7)) axs[0].imshow(img) axs[0].set_title("Immagine RGB", fontsize=20, fontweight="bold") axs[1].imshow(mask) axs[1].set_title("Ground Truth", fontsize=20, fontweight="bold") import os import numpy as np import random import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torchvision import transforms as T from torchvision.transforms import functional as TF from torch.utils.data import Dataset, DataLoader from PIL import Image from torchsummary import summary class CityscapesDataset(Dataset): def __init__(self, image_dir, label_dir, transforms=None): self.imagepaths = [] self.labelpaths = [] self.image_dir = image_dir self.label_dir = label_dir self.transforms = transforms dir = [os.path.join(image_dir, dir) for dir in sorted(os.listdir(image_dir))] for dir1 in sorted(dir): self.imagepaths.extend( [os.path.join(dir1, dir) for dir in sorted(os.listdir(dir1))] ) dir = [os.path.join(label_dir, dir) for dir in sorted(os.listdir(label_dir))] for dir1 in dir: labelpaths = [os.path.join(dir1, dir) for dir in sorted(os.listdir(dir1))] for img in labelpaths: if "labelIds" in os.path.basename(img): self.labelpaths.append(img) def __len__(self): return len(self.imagepaths) def __getitem__(self, idx): img = Image.open(self.imagepaths[idx]) mask = Image.open(self.labelpaths[idx]) if self.transforms: img, mask = self.transform(img, mask) img = TF.to_tensor(img) mask = TF.to_tensor(mask) return img, mask def transform(self, image, mask): # Random crop permette di acquisire un ritaglio # di dimensioni custom dall'immagine (per esempio 128x128) image = TF.resize(image, [128, 128]) mask = TF.resize(mask, [128, 128]) # random.random() produce una variabile aleatoria compresa tra 0 e 1 # con distribuzione uniforme. # Serve per effettuare un flip orizzontale sul ritaglio # con probabilità 0.5 if random.random() > 0.5: image = TF.hflip(image) mask = TF.hflip(mask) return image, mask class DoubleConv(nn.Module): def __init__(self, in_channels=3, out_channels=64): super(DoubleConv, self).__init__() self.doubleconv = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), ) def forward(self, x): return self.doubleconv(x) class UNet(nn.Module): def __init__(self, in_channels=3, out_classes=19, features=[64, 128, 256, 512]): super(UNet, self).__init__() self.ups = nn.ModuleList() self.downs = nn.ModuleList() self.pool = nn.MaxPool2d(kernel_size=2, stride=2) for feature in features: self.downs.append(DoubleConv(in_channels, feature)) in_channels = feature for feature in reversed(features): self.ups.append( nn.ConvTranspose2d(2 * feature, feature, kernel_size=2, stride=2) ) self.ups.append(DoubleConv(feature * 2, feature)) self.bootleneck = DoubleConv(features[-1], features[-1] * 2) self.final_conv = nn.Conv2d(features[0], out_classes, kernel_size=1) def forward(self, x): skip_connections = [] for down in self.downs: x = down(x) skip_connections.append(x) x = self.pool(x) x = self.bootleneck(x) skip_connections = skip_connections[::-1] for idx in range(0, len(self.ups), 2): x = self.ups[idx](x) skip_connection = skip_connections[idx // 2] if x.shape != skip_connection.shape: x = TF.resize(x, size=skip_connection.shape[2:]) concat_skip = torch.cat((skip_connection, x), dim=1) x = self.ups[idx + 1](concat_skip) x = self.final_conv(x) return x # device = "cuda" if torch.cuda.is_available() else "cpu" # model = UNet(3,34) # model=model.to(device) # print(summary(model,(3,128,128))) # testing dimension def test(): x = torch.randn((1, 3, 256, 256)) print(x.shape) model = UNet(3, 34) preds = model(x) print(preds.shape) test() # Definisci il numero di epoche e il learning rate epochs = 5 learning_rate = 0.1 model = UNet(3, 34) # e il dispositivo su cui far girare il codice ('cuda' o 'cpu') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Definire la loss function, che può essere custom o presa dal pacchetto nn batch_size = 1 model = model.to(device) loss_function = nn.CrossEntropyLoss(ignore_index=255) loss_function = loss_function.to(device) # definire l'ottimizzatore optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9) # Get train and val dataset instances train_dir = "/kaggle/input/cityscapes/Cityspaces/images/train" label_train_dir = "/kaggle/input/cityscapes/Cityspaces/gtFine/train" val_dir = "/kaggle/input/cityscapes/Cityspaces/images/val" label_val_dir = "/kaggle/input/cityscapes/Cityspaces/gtFine/val" train_dataset = CityscapesDataset( image_dir=train_dir, label_dir=label_train_dir, transforms=True ) valid_dataset = CityscapesDataset( image_dir=val_dir, label_dir=label_val_dir, transforms=True ) train_dataset, test_dataset = torch.utils.data.random_split(train_dataset, [0.7, 0.3]) # train_dataset.__getitem__(3) # Get train and val data loaders test_loader = DataLoader(test_dataset) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) val_loader = DataLoader(valid_dataset, batch_size=batch_size) def jaccard_loss(true, logits, eps=1e-7): """Computes the Jaccard loss, a.k.a the IoU loss. Note that PyTorch optimizers minimize a loss. In this case, we would like to maximize the jaccard loss so we return the negated jaccard loss. Args: true: a tensor of shape [B, H, W] or [B, 1, H, W]. logits: a tensor of shape [B, C, H, W]. Corresponds to the raw output or logits of the model. eps: added to the denominator for numerical stability. Returns: jacc_loss: the Jaccard loss. """ num_classes = logits.shape[1] if num_classes == 1: true_1_hot = torch.eye(num_classes + 1).to(device)[true.squeeze(1)] true_1_hot = true_1_hot.permute(0, 3, 1, 2).float() true_1_hot_f = true_1_hot[:, 0:1, :, :] true_1_hot_s = true_1_hot[:, 1:2, :, :] true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1) pos_prob = torch.sigmoid(logits) neg_prob = 1 - pos_prob probas = torch.cat([pos_prob, neg_prob], dim=1) else: true_1_hot = torch.eye(num_classes).to(device)[true.squeeze(1)] true_1_hot = true_1_hot.permute(0, 3, 1, 2).float() probas = F.softmax(logits, dim=1) true_1_hot = true_1_hot.type(logits.type()) dims = (0,) + tuple(range(2, true.ndimension())) intersection = torch.sum(probas * true_1_hot, dims) cardinality = torch.sum(probas + true_1_hot, dims) union = cardinality - intersection jacc_loss = (intersection / (union + eps)).mean() return 1 - jacc_loss # len(train_loader) # for data, target in train_loader: # print(data.shape) import time # si deve aggiustare IoU def train(epochs, model, train_loader, val_loader, loss, optimizer, patch=False): # torch.cuda.empty_cache() train_losses = [] test_losses = [] val_loss = [] val_iou = [] train_iou = [] min_loss = np.inf model.to(device) fit_time = time.time() for e in range(epochs): print("Starting epoch:", e + 1) since = time.time() running_loss = 0 iou_score = 0 # training loop model.train() for data, target in train_loader: # print('h') # training phase data = data.to(device) target = target.to(device) optimizer.zero_grad() # forward mask = target output = model(data) mask = torch.argmax(target, dim=1) # Loss and IoU evaluation loss = loss_function(output, mask) iou_score += jaccard_loss(mask, output).item() # backward loss.backward() optimizer.step() running_loss += loss.item() model.eval() val_loss = 0 val_iou_score = 0 # validation loop with torch.no_grad(): for data, target in val_loader: output = model(data.to(device)) target = target.to(device) mask = torch.argmax(target, dim=1) val_loss += F.cross_entropy(output, mask, reduction="sum").item() # Loss and IoU evaluation val_iou_score += jaccard_loss(target, output) loss = loss_function(output, mask) val_loss += loss.item() # Viene salvata la loss ad ogni epoca sia per il training che per il validation train_losses.append(running_loss / len(train_loader)) test_losses.append(val_loss / len(val_loader)) # Viene salvato il modello solo se la validation loss è migliore degli step precedenti. if min_loss > (val_loss / len(val_loader)): print( "Loss Decreasing.. {:.3f} >> {:.3f} ".format( min_loss, (val_loss / len(val_loader)) ) ) min_loss = val_loss / len(val_loader) print("saving model...") torch.save(model.state_dict(), "UNet_best.pt") # Viene salvata la IoU ad ogni epoca sia per il training che per il validation val_iou.append(val_iou_score / len(val_loader)) train_iou.append(iou_score / len(train_loader)) print( "Epoch:{}/{}..".format(e + 1, epochs), "Train Loss: {:.3f}..".format(running_loss / len(train_loader)), "Val Loss: {:.3f}..".format(val_loss / len(val_loader)), "Train mIoU:{:.3f}..".format(iou_score / len(train_loader)), "Val mIoU: {:.3f}..".format(val_iou_score / len(val_loader)), "Time: {:.2f}m".format((time.time() - since) / 60), ) history = { "train_loss": train_losses, "val_loss": test_losses, "train_miou": train_iou, "val_miou": val_iou, } print("Total time: {:.2f} m".format((time.time() - fit_time) / 60)) print("saving model...") torch.save(model, "UNet_last.pt") return history # history = train(epochs, model, train_loader, val_loader, loss_function, optimizer) random_idx = random.randint(0, len(train_dataset) - 1) image, mask = train_dataset[random_idx] with torch.no_grad(): for data, target in train_loader: image = data mask = target output = model(data.to(device)) # non so come visualizzare l'immagine dato che l'immagine in uscita ha 19 bande output = torch.argmax(output, dim=1) im = image[0].cpu().numpy() im = np.moveaxis(im, 0, -1) ma = mask[0].cpu().numpy() ma = np.squeeze(np.moveaxis(ma, 0, -1)) out = output[0].cpu().numpy() out = np.squeeze(np.moveaxis(out, 0, -1)) plt.figure() plt.subplot(131) plt.imshow(im) plt.subplot(132) plt.imshow(ma) plt.subplot(133) plt.imshow(out)
false
0
3,783
0
3,803
3,783
129185386
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Import Matplotlib import matplotlib.pyplot as plt x = np.linspace(0, 5, 11) y = x**2 x y # # Functional plt.plot(x, y) plt.xlabel("X Label") plt.ylabel("Y Label") plt.title("Title") plt.subplot(1, 2, 1) plt.plot(x, y, "r") plt.subplot(1, 2, 2) plt.plot(y, x, "b") # # Object Oriented fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) axes.plot(x, y) axes.set_xlabel("X Label") axes.set_ylabel("Y Label") axes.set_title("Title") fig1 = plt.figure() axes1 = fig1.add_axes([0.1, 0.1, 0.8, 0.8]) axes2 = fig1.add_axes([0.2, 0.5, 0.4, 0.3]) axes1.plot(x, y) axes1.set_title("Outer") axes2.plot(y, x) axes2.set_title("Inner") # # Subplots fig, axes = plt.subplots(nrows=1, ncols=2) axes[0].plot(x, y, "b") axes[0].set_title("Left") axes[1].plot(y, x, "r") axes[1].set_title("Right") # fixes overlaps on plots plt.tight_layout() # # Figure Size and DPI fig = plt.figure(figsize=(3, 2)) ax = fig.add_axes([0, 0, 1, 1]) ax.plot(x, y) fig, axes = plt.subplots(figsize=(3, 5)) axes.plot(x, y) fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(3, 5)) axes[0].plot(x, y) axes[1].plot(y, x) # fixes overlaps on plots plt.tight_layout() fig3 = plt.figure() ax = fig3.add_axes([0, 0, 0.5, 0.5]) ax.plot(x, x**2, label="X^2") ax.plot(x, x**3, label="X^3") ax.legend(loc="center") # # Save figure to file fig.savefig("my_pic.png", dpi=200) # # Plot Appearance fig4 = plt.figure() ax = fig4.add_axes([0, 0, 0.5, 0.75]) # alpha = transparancy # lw = line width # ls = line style # marker = mark up x,y on the plot ax.plot( x, y, color="purple", lw=2, ls="-.", alpha=0.5, marker="o", markersize=10, markerfacecolor="yellow", markeredgecolor="green", )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/185/129185386.ipynb
null
null
[{"Id": 129185386, "ScriptId": 38403592, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14559352, "CreationDate": "05/11/2023 16:12:51", "VersionNumber": 1.0, "Title": "Matplotlib Examples", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 122.0, "LinesInsertedFromPrevious": 122.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Import Matplotlib import matplotlib.pyplot as plt x = np.linspace(0, 5, 11) y = x**2 x y # # Functional plt.plot(x, y) plt.xlabel("X Label") plt.ylabel("Y Label") plt.title("Title") plt.subplot(1, 2, 1) plt.plot(x, y, "r") plt.subplot(1, 2, 2) plt.plot(y, x, "b") # # Object Oriented fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) axes.plot(x, y) axes.set_xlabel("X Label") axes.set_ylabel("Y Label") axes.set_title("Title") fig1 = plt.figure() axes1 = fig1.add_axes([0.1, 0.1, 0.8, 0.8]) axes2 = fig1.add_axes([0.2, 0.5, 0.4, 0.3]) axes1.plot(x, y) axes1.set_title("Outer") axes2.plot(y, x) axes2.set_title("Inner") # # Subplots fig, axes = plt.subplots(nrows=1, ncols=2) axes[0].plot(x, y, "b") axes[0].set_title("Left") axes[1].plot(y, x, "r") axes[1].set_title("Right") # fixes overlaps on plots plt.tight_layout() # # Figure Size and DPI fig = plt.figure(figsize=(3, 2)) ax = fig.add_axes([0, 0, 1, 1]) ax.plot(x, y) fig, axes = plt.subplots(figsize=(3, 5)) axes.plot(x, y) fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(3, 5)) axes[0].plot(x, y) axes[1].plot(y, x) # fixes overlaps on plots plt.tight_layout() fig3 = plt.figure() ax = fig3.add_axes([0, 0, 0.5, 0.5]) ax.plot(x, x**2, label="X^2") ax.plot(x, x**3, label="X^3") ax.legend(loc="center") # # Save figure to file fig.savefig("my_pic.png", dpi=200) # # Plot Appearance fig4 = plt.figure() ax = fig4.add_axes([0, 0, 0.5, 0.75]) # alpha = transparancy # lw = line width # ls = line style # marker = mark up x,y on the plot ax.plot( x, y, color="purple", lw=2, ls="-.", alpha=0.5, marker="o", markersize=10, markerfacecolor="yellow", markeredgecolor="green", )
false
0
955
2
955
955
129011073
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import gc import time from contextlib import contextmanager import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import seaborn as sns import warnings warnings.simplefilter(action="ignore", category=FutureWarning) import re import warnings warnings.filterwarnings("ignore") from sklearn.preprocessing import LabelEncoder, MinMaxScaler, OneHotEncoder from sklearn.metrics import roc_auc_score, roc_curve from sklearn.model_selection import KFold, StratifiedKFold from sklearn.impute import SimpleImputer from lightgbm import LGBMClassifier # basic Logistic regression - on traning set from sklearn.model_selection import train_test_split import featuretools as ft import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import roc_auc_score, confusion_matrix from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import PolynomialFeatures # Import required libraries/packages import warnings warnings.filterwarnings("ignore") import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from timeit import default_timer as timer import os import random import csv import json import itertools import pprint # from pydash import at import gc import re # import featuretools for automated feature engineering import featuretools as ft from featuretools import selection # Import sklearn helper metrics and transformations from sklearn.base import TransformerMixin from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split, KFold from sklearn.utils import resample from sklearn.metrics import ( confusion_matrix, accuracy_score, precision_score, recall_score, roc_auc_score, classification_report, roc_curve, auc, f1_score, ) # Import models from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from xgboost import XGBClassifier from sklearn.ensemble import GradientBoostingClassifier import lightgbm as lgb # import library for hyperparameter optimization from hyperopt import STATUS_OK from hyperopt import hp, tpe, Trials, fmin from hyperopt.pyll.stochastic import sample import os def reduce_mem_usage(df): numerics = ["int16", "int32", "int64", "float16", "float32", "float64"] for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == "int": if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if ( c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max ): df[col] = df[col].astype(np.float16) elif ( c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max ): df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) return df def get_balance_data(): default_dir = "../input/home-credit-default-risk/" pos_dtype = { "SK_ID_PREV": np.uint32, "SK_ID_CURR": np.uint32, "MONTHS_BALANCE": np.int32, "SK_DPD": np.int32, "SK_DPD_DEF": np.int32, "CNT_INSTALMENT": np.float32, "CNT_INSTALMENT_FUTURE": np.float32, } install_dtype = { "SK_ID_PREV": np.uint32, "SK_ID_CURR": np.uint32, "NUM_INSTALMENT_NUMBER": np.int32, "NUM_INSTALMENT_VERSION": np.float32, "DAYS_INSTALMENT": np.float32, "DAYS_ENTRY_PAYMENT": np.float32, "AMT_INSTALMENT": np.float32, "AMT_PAYMENT": np.float32, } card_dtype = { "SK_ID_PREV": np.uint32, "SK_ID_CURR": np.uint32, "MONTHS_BALANCE": np.int16, "AMT_CREDIT_LIMIT_ACTUAL": np.int32, "CNT_DRAWINGS_CURRENT": np.int32, "SK_DPD": np.int32, "SK_DPD_DEF": np.int32, "AMT_BALANCE": np.float32, "AMT_DRAWINGS_ATM_CURRENT": np.float32, "AMT_DRAWINGS_CURRENT": np.float32, "AMT_DRAWINGS_OTHER_CURRENT": np.float32, "AMT_DRAWINGS_POS_CURRENT": np.float32, "AMT_INST_MIN_REGULARITY": np.float32, "AMT_PAYMENT_CURRENT": np.float32, "AMT_PAYMENT_TOTAL_CURRENT": np.float32, "AMT_RECEIVABLE_PRINCIPAL": np.float32, "AMT_RECIVABLE": np.float32, "AMT_TOTAL_RECEIVABLE": np.float32, "CNT_DRAWINGS_ATM_CURRENT": np.float32, "CNT_DRAWINGS_OTHER_CURRENT": np.float32, "CNT_DRAWINGS_POS_CURRENT": np.float32, "CNT_INSTALMENT_MATURE_CUM": np.float32, } pos_bal = pd.read_csv( os.path.join(default_dir, "POS_CASH_balance.csv"), dtype=pos_dtype ) install = pd.read_csv( os.path.join(default_dir, "installments_payments.csv"), dtype=install_dtype ) card_bal = pd.read_csv( os.path.join(default_dir, "credit_card_balance.csv"), dtype=card_dtype ) return pos_bal, install, card_bal def get_dataset(): default_dir = "../input/home-credit-default-risk/" app_train = pd.read_csv(os.path.join(default_dir, "application_train.csv")) # app_train = reduce_mem_usage(app_train) app_test = pd.read_csv(os.path.join(default_dir, "application_test.csv")) # app_test = reduce_mem_usage(app_test) apps = pd.concat([app_train, app_test]) prev = pd.read_csv(os.path.join(default_dir, "previous_application.csv")) prev = reduce_mem_usage(prev) bureau = pd.read_csv(os.path.join(default_dir, "bureau.csv")) bureau = reduce_mem_usage(bureau) bureau_bal = pd.read_csv(os.path.join(default_dir, "bureau_balance.csv")) bureau_bal = reduce_mem_usage(bureau_bal) pos_bal, install, card_bal = get_balance_data() return app_train, apps, prev, bureau, bureau_bal, pos_bal, install, card_bal app_train, apps, prev, bureau, bureau_bal, pos_bal, install, card_bal = get_dataset() df_train = app_train.copy() df_train_test = apps.copy() base_bureau_up = bureau.copy() base_previous_application_up = prev.copy() base_POS_CASH_balance_up = pos_bal.copy() base_bureau_balance_up = bureau_bal.copy() base_credit_card_balance_up = card_bal.copy() base_installments_payments_up = install.copy() ## function for checking the data shape def check_df(dfs): for i, (df, name) in enumerate(dfs): print(f"{name}: {df.shape}") dfs = [ (df_train_test, "df_train_test"), (base_bureau_up, "base_bureau_up"), (base_previous_application_up, "base_previous_application_up"), (base_POS_CASH_balance_up, "base_POS_CASH_balance_up"), (base_bureau_balance_up, "base_bureau_balance_up"), (base_credit_card_balance_up, "base_credit_card_balance_up"), (base_installments_payments_up, "base_installments_payments_up"), ] check_df(dfs) dfs = [ (df_train_test, "df_train_test"), (base_bureau_up, "base_bureau_up"), (base_previous_application_up, "base_previous_application_up"), (base_POS_CASH_balance_up, "base_POS_CASH_balance_up"), (base_bureau_balance_up, "base_bureau_balance_up"), (base_credit_card_balance_up, "base_credit_card_balance_up"), (base_installments_payments_up, "base_installments_payments_up"), ] def check_head(dfs): for df, name in dfs: print(name) display(df.head(3)) check_head(dfs) # **2. Exploratory data analysis** print(df_train["TARGET"].nunique()) def missing_values_table(df): if isinstance(df, pd.Series): df = pd.DataFrame(df) missing_values_dict = {} na_columns = df.columns[df.isnull().any()].tolist() n_miss = df[na_columns].isnull().sum().sort_values(ascending=False) ratio = (n_miss / df.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat( [n_miss, np.round(ratio, 2)], axis=1, keys=["Missing Values", "Percentage"] ) if isinstance(df, pd.Series): missing_values_dict[df.name] = missing_df else: missing_values_dict["DataFrame"] = missing_df return missing_values_dict missing_values_table(df_train_test["TARGET"]) df_train["TARGET"].astype(int).plot.hist() # Find correlations with the target and sort numeric_cols = df_train_test.select_dtypes(include=[np.number]).columns correlations = df_train_test[numeric_cols].corr()["TARGET"].sort_values() # Display correlations print("Most Positive Correlations:\n", correlations.tail(15)) print("\nMost Negative Correlations:\n", correlations.head(15)) # # Identify the categorical variable def identify_columns(df, threshold=25): # Identify categorical object columns, categorical numerical columns, and non-categorical columns cat_object_list = [ i for i in df.columns if df[i].dtype == "object" and df[i].nunique() < threshold ] cat_num_list = [ i for i in df.columns if df[i].dtype in ["int64", "float64"] and df[i].nunique() < threshold ] non_cat_list = [ i for i in df.columns if i not in cat_object_list and i not in cat_num_list ] # Identify object columns and numerical columns in non-categorical columns mix_serise_col = df[non_cat_list] non_cat_obj = [ i for i in mix_serise_col.columns if mix_serise_col[i].dtype == "object" ] non_cat_num = [ i for i in mix_serise_col.columns if mix_serise_col[i].dtype in ["int64", "float64"] ] # #Print the results # print('Categorical object columns:', len(cat_object_list)) # print('Categorical numerical columns:', len(cat_num_list)) # print('Non-categorical columns:', len(non_cat_list)) # print('Object columns in non-categorical columns:', len(non_cat_obj)) # print('Numerical columns in non-categorical columns:', len(non_cat_num)) # Return the results as a dictionary results = { "cat_object_list": cat_object_list, "cat_num_list": cat_num_list, "non_cat_list": non_cat_list, "non_cat_obj": non_cat_obj, "non_cat_num": non_cat_num, } return results results = identify_columns(df_train_test) categorical_num = identify_columns(df_train_test)["cat_num_list"] categorical_obj = identify_columns(df_train_test)["cat_object_list"] non_categorical_obj = identify_columns(df_train_test)["non_cat_obj"] print(categorical_obj) print(categorical_num) def plot_categorical_feature(feature, df=None, orientation_horizontal=True): if df is None: df = df_train_test else: df = df temp = df[feature].value_counts() df1 = pd.DataFrame({feature: temp.index, "Number of contracts": temp.values}) # Calculate the percentage of target=1 per category value cat_perc = df[[feature, "TARGET"]].groupby([feature], as_index=False).mean() cat_perc.sort_values(by="TARGET", ascending=False, inplace=True) sns.set_color_codes("colorblind") if orientation_horizontal == True: plt.figure(figsize=(15, 5)) plt.subplot(121) s1 = sns.barplot(y=feature, x="Number of contracts", data=df1) plt.subplot(122) s2 = sns.barplot(y=feature, x="TARGET", data=cat_perc) plt.xlabel("Fraction of loans defaulted", fontsize=12) plt.ylabel(feature, fontsize=12) else: plt.figure(figsize=(10, 12)) plt.subplot(211) s1 = sns.barplot(x=feature, y="Number of contracts", data=df1) s1.set_xticklabels(s1.get_xticklabels(), rotation=90) plt.subplot(212) s2 = sns.barplot(x=feature, y="TARGET", data=cat_perc) s2.set_xticklabels(s2.get_xticklabels(), rotation=90) plt.ylabel("Fraction of loans defaulted", fontsize=12) plt.xlabel(feature, fontsize=12) plt.tick_params(axis="both", which="major", labelsize=12) plt.subplots_adjust(wspace=0.6) plt.show() # Concatenate the two lists features = categorical_obj + non_categorical_obj # Loop over the features and plot each one for feature in features: plot_categorical_feature(feature) plot_categorical_feature("ORGANIZATION_TYPE", None, False) # **plot_bivariate_distribution** def plot_bivariate_distribution(feature, df): if df is None: df = df_train else: df = df plt.figure(figsize=(10, 4)) sns.kdeplot(df.loc[df["TARGET"] == 0, feature], label="TARGET == 0") sns.kdeplot(df.loc[df["TARGET"] == 1, feature], label="TARGET == 1") plt.xlabel(feature) plt.ylabel("Density") plt.title("Distribution of {} by Target Value".format(feature)) plt.legend() plot_bivariate_distribution("EXT_SOURCE_1", df_train) plot_bivariate_distribution("EXT_SOURCE_2", df_train) plot_bivariate_distribution("AMT_INCOME_TOTAL", df_train) plot_bivariate_distribution("DAYS_EMPLOYED", df_train) plot_bivariate_distribution("DAYS_BIRTH", df_train) # 1. Feature engineering # Converting days col into a year def create_day_to_year(df, ls_cols): for col in ls_cols: if col.startswith("DAYS") and np.issubdtype(df[col].dtype, np.number): new_col = col.replace("DAYS", "YEARS_UP") df[new_col] = round(np.abs(df[col] / 365)) df.drop(columns=[col], inplace=True) return df dfs = [ (df_train_test, "df_train_test"), (base_bureau_up, "base_bureau_up"), (base_previous_application_up, "base_previous_application_up"), (base_POS_CASH_balance_up, "base_POS_CASH_balance_up"), (base_bureau_balance_up, "base_bureau_balance_up"), (base_credit_card_balance_up, "base_credit_card_balance_up"), (base_installments_payments_up, "base_installments_payments_up"), ] for df, name in dfs: day_cols = df.filter(like="DAYS").columns df = create_day_to_year(df, day_cols) def print_years_cols(dfs): for df, name in dfs: day_cols = df.filter(like="YEARS_UP").columns print(f"Number of columns with 'YEARS' in {name}: {len(day_cols)}") print(f"Columns with 'YEARS' in {name}:") for col in day_cols: print(col) print_years_cols([(df_train_test, "df_train")]) plot_bivariate_distribution("YEARS_UP_BIRTH", df_train_test) # Converting income band; def create_income_band(df): bins = [ 0, 30000, 65000, 95000, 130000, 160000, 190000, 220000, 275000, 325000, np.inf, ] labels = range(1, len(bins)) df["INCOME_BAND"] = pd.cut(df["AMT_INCOME_TOTAL"], bins=bins, labels=labels) return df dfs = [(df_train_test, "df_train_test")] for df, name in dfs: df = create_income_band(df) fig, ax = plt.subplots(figsize=(5, 3)) sns.countplot(data=df_train_test, x="INCOME_BAND", hue="TARGET") ax.set_title("Income data for people repaying and defaulting loans") ax.set_ylabel("Count") plt.tight_layout() plt.show() # # Encoding & Missing value handeling def label_encoding(df): for col in df.columns: if df[col].dtype == "object": if len(df[col].unique()) == 2: le = LabelEncoder() le.fit(df[col]) df[col] = le.transform(df[col]) # else: # df[col] = pd.get_dummies(df[col]) return df def missing_values_table(df): # Check if input is a dataframe or a series if isinstance(df, pd.Series): df = pd.DataFrame(df) # Get columns with missing values na_columns = df.columns[df.isnull().any()].tolist() # Count missing values and calculate ratio n_miss = df[na_columns].isnull().sum().sort_values(ascending=False) ratio = (n_miss / df.shape[0] * 100).sort_values(ascending=False) # Create DataFrame with missing values and ratio missing_df = pd.concat( [n_miss, np.round(ratio, 2)], axis=1, keys=["Missing Values", "Percentage"] ) return missing_df def missing_preprocess_data(df): # Identify columns with more than 60% missing values missing_cols = df.columns[df.isnull().mean() > 0.6] # Drop columns with more than 60% missing values num_cols_dropped = len(missing_cols) df.drop(columns=missing_cols, inplace=True) print(f"Dropped {num_cols_dropped} columns due to missing value threshold") # Fill remaining missing values using median imputation imputer = SimpleImputer(strategy="median") df = pd.DataFrame(imputer.fit_transform(df), columns=df.columns) # Print out the original and preprocessed datasets print("Preprocessed dataset:") print(df) return df base_train_test_up = df_train_test.copy() label_encoding(base_train_test_up) base_train_test_up = pd.get_dummies(base_train_test_up) base_train_test_up.shape preprocessed_data = missing_preprocess_data(base_train_test_up) missing_values_table(preprocessed_data) # # Basic model alalysis # Basic Logistic regrassion # Split the data into training and testing sets train_X, test_X, train_Y, test_Y = train_test_split( preprocessed_data.drop(["SK_ID_CURR", "TARGET"], axis=1), preprocessed_data["TARGET"], test_size=0.25, random_state=123, ) # train_X contains the independent variables for the training set # test_X contains the independent variables for the testing set # train_Y contains the dependent variable (TARGET) for the training set # test_Y contains the dependent variable (TARGET) for the testing set def train_predict_visualize(train_X, train_Y, test_X, test_Y): # Train a logistic regression classifier clf = LogisticRegression(random_state=123) clf.fit(train_X, train_Y) # Predict using test data pred_Y = clf.predict(test_X) pred_prob_Y = clf.predict_proba(test_X)[:, 1] # Calculate metrics auc = roc_auc_score(test_Y, pred_prob_Y) tn, fp, fn, tp = confusion_matrix(test_Y, pred_Y).ravel() accuracy = (tp + tn) / (tp + tn + fp + fn) precision = tp / (tp + fp) recall = tp / (tp + fn) f1_score = 2 * precision * recall / (precision + recall) # Print metrics print("AUC:", auc) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("F1 Score:", f1_score) # Visualize feature importances coef = pd.Series(clf.coef_[0], index=train_X.columns) coef.nlargest(10).plot(kind="barh") plt.show() def train_predict_visualize(train_X, train_Y, test_X, test_Y): # Train a logistic regression classifier clf = LogisticRegression(random_state=123) clf.fit(train_X, train_Y) # Predict using test data pred_Y = clf.predict(test_X) pred_prob_Y = clf.predict_proba(test_X)[:, 1] # Calculate metrics auc = roc_auc_score(test_Y, pred_prob_Y) tn, fp, fn, tp = confusion_matrix(test_Y, pred_Y).ravel() accuracy = (tp + tn) / (tp + tn + fp + fn) precision = tp / (tp + fp) recall = tp / (tp + fn) f1_score = 2 * precision * recall / (precision + recall) # Print metrics print("AUC:", auc) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("F1 Score:", f1_score) # Visualize feature importances coef = pd.Series(clf.coef_[0], index=train_X.columns) top_features = coef.nlargest(10) top_features.plot(kind="barh") plt.show() # Print table of top 10 features print("Top 10 Features:") print(top_features.to_string()) train_predict_visualize(train_X, train_Y, test_X, test_Y) label_encoding(base_bureau_up) base_bureau_up = pd.get_dummies(base_bureau_up) df_bureau_up = missing_preprocess_data(base_bureau_up) missing_values_table(df_bureau_up) label_encoding(base_previous_application_up) base_previous_application_up = pd.get_dummies(base_previous_application_up) df_previous_application_up = missing_preprocess_data(base_previous_application_up) missing_values_table(df_previous_application_up) label_encoding(base_POS_CASH_balance_up) base_POS_CASH_balance_up = pd.get_dummies(base_POS_CASH_balance_up) df_POS_CASH_balance_up = missing_preprocess_data(base_POS_CASH_balance_up) missing_values_table(df_POS_CASH_balance_up) # # Domain Knowledge feature engeening # Domain knowledge # Debt-to-income-ratio preprocessed_data["CREDIT_INCOME_PERCENT"] = ( preprocessed_data["AMT_CREDIT"] / preprocessed_data["AMT_INCOME_TOTAL"] ) # Loan-to-Value-ration preprocessed_data["LOAN_TO_VALUE_RATION"] = ( preprocessed_data["AMT_CREDIT"] / preprocessed_data["AMT_GOODS_PRICE"] ) # percentage of the applicant's income that is being used to pay off the loan. preprocessed_data["ANNUITY_INCOME_PERCENT"] = ( preprocessed_data["AMT_ANNUITY"] / preprocessed_data["AMT_INCOME_TOTAL"] ) # This ratio represents the length of time it will take the applicant to pay off the loan. preprocessed_data["CREDIT_TERM"] = ( preprocessed_data["AMT_ANNUITY"] / preprocessed_data["AMT_CREDIT"] ) # Employment history percentage preprocessed_data["YEARS_EMPLOYED_PERCENT"] = ( preprocessed_data["YEARS_UP_EMPLOYED"] / preprocessed_data["YEARS_UP_BIRTH"] ) # age # -------- # ### Connecting the data aet # Merge preprocessed_data and df_bureau_up df1 = pd.merge(preprocessed_data, df_bureau_up, on="SK_ID_CURR", how="left") # # # Merge df1 and df_previous_application_up # df2 = pd.merge(df1, df_previous_application_up, on='SK_ID_CURR', how='left') # # # Merge df2 and df_bureau_balance_up # df3 = pd.merge(df2, base_bureau_balance_up, on='SK_ID_BUREAU', how='left') # # # # Merge df3 and df_POS_CASH_balance_up # df4 = pd.merge(df3, base_POS_CASH_balance_up, on='SK_ID_CURR', how='left') # # # # Merge df4 and df_installments_payments_up # df5 = pd.merge(df4, base_installments_payments_up, on='SK_ID_CURR', how='left') # # # Merge df5 and df_credit_card_balance_up # df = pd.merge(df1, base_credit_card_balance_up, on='SK_ID_CURR', how='left') # Get all column names of the dataframe col_names = df1.columns.tolist() # Print the column names print(col_names) # ### Poly feature engineering # Define the columns to include in the polynomial features poly_cols = [ "AMT_INCOME_TOTAL", "AMT_CREDIT", "AMT_ANNUITY", "YEARS_UP_BIRTH", "YEARS_UP_EMPLOYED", ] # Check if all columns exist in the dataframe if all(col in df1.columns for col in poly_cols): # Create polynomial features poly = PolynomialFeatures(degree=2, include_bias=False) poly_features = poly.fit_transform(df1[poly_cols]) poly_feature_names = poly.get_feature_names_out(poly_cols) poly_df = pd.DataFrame(poly_features, columns=poly_feature_names) # Merge the polynomial features with the original dataframe df1 = pd.concat([df1, poly_df], axis=1) else: print("One or more columns not found in dataframe") # Check for duplicated column names duplicated_cols = df1.columns[df1.columns.duplicated()] # Print the duplicated column names print("Duplicated columns:", duplicated_cols) # Remove duplicated columns if any if len(duplicated_cols) > 0: df1 = df1.loc[:, ~df1.columns.duplicated()] print("Duplicated columns removed") # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( df1.drop(["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"], axis=1), df1["TARGET"], test_size=0.2, random_state=42, ) # Train an XGBoost classifier xgb = XGBClassifier(n_estimators=100, random_state=42) xgb.fit(X_train, y_train) # Get the feature importances importances = xgb.feature_importances_ # Create a dataframe with the feature importances feature_importances = pd.DataFrame( {"feature": X_train.columns, "importance": importances} ) # Sort the dataframe by importance feature_importances = feature_importances.sort_values("importance", ascending=False) # Select the top 50 features top_features = feature_importances.head(50) # Create a new dataframe with only the top features df_top = df1[ ["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"] + top_features["feature"].tolist() ] # Save the new dataframe to a CSV file df_top.to_csv("data_top.csv", index=False) # Plot the feature importances plt.figure(figsize=(16, 32)) plt.barh(top_features["feature"], top_features["importance"]) plt.xlabel("Importance") plt.ylabel("Feature") plt.title("Feature Importances") plt.show() import xgboost as xgb # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( df_top.drop(["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"], axis=1), df_top["TARGET"], test_size=0.2, random_state=42, ) # Train an XGBClassifier xgb_model = xgb.XGBClassifier(n_estimators=100, random_state=42) xgb_model.fit(X_train, y_train) # Make predictions on the test set y_pred = xgb_model.predict(X_test) # Evaluate the performance of the model accuracy = accuracy_score(y_test, y_pred) precision = precision_score(y_test, y_pred) recall = recall_score(y_test, y_pred) f1 = f1_score(y_test, y_pred) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1) df_3 = df1.copy() # Remove any non-alphanumeric characters from the feature names df_3.columns = [re.sub("[^0-9a-zA-Z]+", "_", col) for col in df_3.columns] # Split the data into training and testing sets X_train_2, X_test_2, y_train_2, y_test_2 = train_test_split( df_3.drop(["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"], axis=1), df_3["TARGET"], test_size=0.2, random_state=42, ) # Train a LightGBM classifier lgbm_2 = lgb.LGBMClassifier(n_estimators=100, random_state=42) lgbm_2.fit(X_train_2, y_train_2) # Get the feature importances importances_2 = lgbm_2.feature_importances_ # Create a dataframe with the feature importances feature_importances_2 = pd.DataFrame( {"feature": X_train_2.columns, "importance": importances_2} ) # Sort the dataframe by importance feature_importances_2 = feature_importances_2.sort_values("importance", ascending=False) # Select the top 50 features top_features_2 = feature_importances_2.head(50) # Create a new dataframe with only the top features df_top_2 = df_3[ ["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"] + top_features_2["feature"].tolist() ] # Save the new dataframe to a CSV file df_top_2.to_csv("data_top_2.csv", index=False) # Plot the feature importances plt.figure(figsize=(16, 32)) plt.barh(top_features_2["feature"], top_features_2["importance"]) plt.xlabel("Importance") plt.ylabel("Feature") plt.title("Feature Importances") plt.show() import lightgbm as lgb # Remove any non-alphanumeric characters from the feature names df_top_2.columns = [re.sub("[^0-9a-zA-Z]+", "_", col) for col in df_top_2.columns] # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( df_top_2.drop(["SK_ID_CURR", "SK_ID_BUREAU", "TARGET"], axis=1), df_top_2["TARGET"], test_size=0.2, random_state=42, ) # Train a LightGBM classifier lgbm = lgb.LGBMClassifier(n_estimators=100, random_state=42) lgbm.fit(X_train, y_train) # Make predictions on the test set y_pred = lgbm.predict(X_test) # Evaluate the performance of the model accuracy_lgbm = accuracy_score(y_test, y_pred) precision_lgbm = precision_score(y_test, y_pred) recall_lgbm = recall_score(y_test, y_pred) f1_lgbm = f1_score(y_test, y_pred) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1) import matplotlib.pyplot as plt import numpy as np # Define the evaluation metrics metrics = ["Accuracy", "Precision", "Recall", "F1 score"] xgb_scores = [accuracy, precision, recall, f1] lgbm_scores = [accuracy_lgbm, precision_lgbm, recall_lgbm, f1_lgbm] # Create a bar plot x = np.arange(len(metrics)) width = 0.35 fig, ax = plt.subplots() rects1 = ax.bar(x - width / 2, xgb_scores, width, label="XGBClassifier") rects2 = ax.bar(x + width / 2, lgbm_scores, width, label="LGBMClassifier") # Add labels and title ax.set_ylabel("Score") ax.set_xticks(x) ax.set_xticklabels(metrics) ax.legend() ax.set_title("Comparison of XGBClassifier and LGBMClassifier") # Add values above the bars def autolabel(rects): for rect in rects: height = rect.get_height() ax.annotate( "{:.3f}".format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha="center", va="bottom", ) autolabel(rects1) autolabel(rects2) plt.show() from sklearn.model_selection import KFold from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score # Define the number of folds k = 5 # Define the K-fold cross-validator kf = KFold(n_splits=k, shuffle=True, random_state=42) # Initialize empty lists to store the scores for each fold accuracy_scores_xgb = [] precision_scores_xgb = [] recall_scores_xgb = [] f1_scores_xgb = [] # Loop over the folds for train_index, test_index in kf.split(df_top): # Split the data into training and test sets X_train, X_test = ( df_top.drop(["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"], axis=1).iloc[train_index], df_top.drop(["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"], axis=1).iloc[test_index], ) y_train, y_test = ( df_top["TARGET"].iloc[train_index], df_top["TARGET"].iloc[test_index], ) # Initialize the XGBClassifier model model = xgb.XGBClassifier(n_estimators=100, random_state=42) # Fit the model on the training data model.fit(X_train, y_train) # Make predictions on the test data y_pred = model.predict(X_test) # Calculate the evaluation metrics accuracy_xgb = accuracy_score(y_test, y_pred) precision_xgb = precision_score(y_test, y_pred) recall_xgb = recall_score(y_test, y_pred) f1_xgb = f1_score(y_test, y_pred) # Append the scores to the lists accuracy_scores_xgb.append(accuracy_xgb) precision_scores_xgb.append(precision_xgb) recall_scores_xgb.append(recall_xgb) f1_scores_xgb.append(f1_xgb) # Calculate the mean and standard deviation of the scores mean_accuracy = np.mean(accuracy_scores_xgb) std_accuracy = np.std(accuracy_scores_xgb) mean_precision = np.mean(precision_scores_xgb) std_precision = np.std(precision_scores_xgb) mean_recall = np.mean(recall_scores_xgb) std_recall = np.std(recall_scores_xgb) mean_f1 = np.mean(f1_scores_xgb) std_f1 = np.std(f1_scores_xgb) # Print the results print( "Accuracy_xgb: {:.2f}% (+/- {:.2f}%)".format( mean_accuracy * 100, std_accuracy * 100 ) ) print( "Precision_xgb: {:.2f}% (+/- {:.2f}%)".format( mean_precision * 100, std_precision * 100 ) ) print("Recall_xgb: {:.2f}% (+/- {:.2f}%)".format(mean_recall * 100, std_recall * 100)) print("F1 score_xgb: {:.2f}% (+/- {:.2f}%)".format(mean_f1 * 100, std_f1 * 100))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/011/129011073.ipynb
null
null
[{"Id": 129011073, "ScriptId": 38342411, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11858828, "CreationDate": "05/10/2023 09:46:36", "VersionNumber": 1.0, "Title": "notebookb1d743dbed", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 831.0, "LinesInsertedFromPrevious": 831.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import gc import time from contextlib import contextmanager import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import seaborn as sns import warnings warnings.simplefilter(action="ignore", category=FutureWarning) import re import warnings warnings.filterwarnings("ignore") from sklearn.preprocessing import LabelEncoder, MinMaxScaler, OneHotEncoder from sklearn.metrics import roc_auc_score, roc_curve from sklearn.model_selection import KFold, StratifiedKFold from sklearn.impute import SimpleImputer from lightgbm import LGBMClassifier # basic Logistic regression - on traning set from sklearn.model_selection import train_test_split import featuretools as ft import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import roc_auc_score, confusion_matrix from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import PolynomialFeatures # Import required libraries/packages import warnings warnings.filterwarnings("ignore") import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from timeit import default_timer as timer import os import random import csv import json import itertools import pprint # from pydash import at import gc import re # import featuretools for automated feature engineering import featuretools as ft from featuretools import selection # Import sklearn helper metrics and transformations from sklearn.base import TransformerMixin from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split, KFold from sklearn.utils import resample from sklearn.metrics import ( confusion_matrix, accuracy_score, precision_score, recall_score, roc_auc_score, classification_report, roc_curve, auc, f1_score, ) # Import models from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from xgboost import XGBClassifier from sklearn.ensemble import GradientBoostingClassifier import lightgbm as lgb # import library for hyperparameter optimization from hyperopt import STATUS_OK from hyperopt import hp, tpe, Trials, fmin from hyperopt.pyll.stochastic import sample import os def reduce_mem_usage(df): numerics = ["int16", "int32", "int64", "float16", "float32", "float64"] for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == "int": if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if ( c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max ): df[col] = df[col].astype(np.float16) elif ( c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max ): df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) return df def get_balance_data(): default_dir = "../input/home-credit-default-risk/" pos_dtype = { "SK_ID_PREV": np.uint32, "SK_ID_CURR": np.uint32, "MONTHS_BALANCE": np.int32, "SK_DPD": np.int32, "SK_DPD_DEF": np.int32, "CNT_INSTALMENT": np.float32, "CNT_INSTALMENT_FUTURE": np.float32, } install_dtype = { "SK_ID_PREV": np.uint32, "SK_ID_CURR": np.uint32, "NUM_INSTALMENT_NUMBER": np.int32, "NUM_INSTALMENT_VERSION": np.float32, "DAYS_INSTALMENT": np.float32, "DAYS_ENTRY_PAYMENT": np.float32, "AMT_INSTALMENT": np.float32, "AMT_PAYMENT": np.float32, } card_dtype = { "SK_ID_PREV": np.uint32, "SK_ID_CURR": np.uint32, "MONTHS_BALANCE": np.int16, "AMT_CREDIT_LIMIT_ACTUAL": np.int32, "CNT_DRAWINGS_CURRENT": np.int32, "SK_DPD": np.int32, "SK_DPD_DEF": np.int32, "AMT_BALANCE": np.float32, "AMT_DRAWINGS_ATM_CURRENT": np.float32, "AMT_DRAWINGS_CURRENT": np.float32, "AMT_DRAWINGS_OTHER_CURRENT": np.float32, "AMT_DRAWINGS_POS_CURRENT": np.float32, "AMT_INST_MIN_REGULARITY": np.float32, "AMT_PAYMENT_CURRENT": np.float32, "AMT_PAYMENT_TOTAL_CURRENT": np.float32, "AMT_RECEIVABLE_PRINCIPAL": np.float32, "AMT_RECIVABLE": np.float32, "AMT_TOTAL_RECEIVABLE": np.float32, "CNT_DRAWINGS_ATM_CURRENT": np.float32, "CNT_DRAWINGS_OTHER_CURRENT": np.float32, "CNT_DRAWINGS_POS_CURRENT": np.float32, "CNT_INSTALMENT_MATURE_CUM": np.float32, } pos_bal = pd.read_csv( os.path.join(default_dir, "POS_CASH_balance.csv"), dtype=pos_dtype ) install = pd.read_csv( os.path.join(default_dir, "installments_payments.csv"), dtype=install_dtype ) card_bal = pd.read_csv( os.path.join(default_dir, "credit_card_balance.csv"), dtype=card_dtype ) return pos_bal, install, card_bal def get_dataset(): default_dir = "../input/home-credit-default-risk/" app_train = pd.read_csv(os.path.join(default_dir, "application_train.csv")) # app_train = reduce_mem_usage(app_train) app_test = pd.read_csv(os.path.join(default_dir, "application_test.csv")) # app_test = reduce_mem_usage(app_test) apps = pd.concat([app_train, app_test]) prev = pd.read_csv(os.path.join(default_dir, "previous_application.csv")) prev = reduce_mem_usage(prev) bureau = pd.read_csv(os.path.join(default_dir, "bureau.csv")) bureau = reduce_mem_usage(bureau) bureau_bal = pd.read_csv(os.path.join(default_dir, "bureau_balance.csv")) bureau_bal = reduce_mem_usage(bureau_bal) pos_bal, install, card_bal = get_balance_data() return app_train, apps, prev, bureau, bureau_bal, pos_bal, install, card_bal app_train, apps, prev, bureau, bureau_bal, pos_bal, install, card_bal = get_dataset() df_train = app_train.copy() df_train_test = apps.copy() base_bureau_up = bureau.copy() base_previous_application_up = prev.copy() base_POS_CASH_balance_up = pos_bal.copy() base_bureau_balance_up = bureau_bal.copy() base_credit_card_balance_up = card_bal.copy() base_installments_payments_up = install.copy() ## function for checking the data shape def check_df(dfs): for i, (df, name) in enumerate(dfs): print(f"{name}: {df.shape}") dfs = [ (df_train_test, "df_train_test"), (base_bureau_up, "base_bureau_up"), (base_previous_application_up, "base_previous_application_up"), (base_POS_CASH_balance_up, "base_POS_CASH_balance_up"), (base_bureau_balance_up, "base_bureau_balance_up"), (base_credit_card_balance_up, "base_credit_card_balance_up"), (base_installments_payments_up, "base_installments_payments_up"), ] check_df(dfs) dfs = [ (df_train_test, "df_train_test"), (base_bureau_up, "base_bureau_up"), (base_previous_application_up, "base_previous_application_up"), (base_POS_CASH_balance_up, "base_POS_CASH_balance_up"), (base_bureau_balance_up, "base_bureau_balance_up"), (base_credit_card_balance_up, "base_credit_card_balance_up"), (base_installments_payments_up, "base_installments_payments_up"), ] def check_head(dfs): for df, name in dfs: print(name) display(df.head(3)) check_head(dfs) # **2. Exploratory data analysis** print(df_train["TARGET"].nunique()) def missing_values_table(df): if isinstance(df, pd.Series): df = pd.DataFrame(df) missing_values_dict = {} na_columns = df.columns[df.isnull().any()].tolist() n_miss = df[na_columns].isnull().sum().sort_values(ascending=False) ratio = (n_miss / df.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat( [n_miss, np.round(ratio, 2)], axis=1, keys=["Missing Values", "Percentage"] ) if isinstance(df, pd.Series): missing_values_dict[df.name] = missing_df else: missing_values_dict["DataFrame"] = missing_df return missing_values_dict missing_values_table(df_train_test["TARGET"]) df_train["TARGET"].astype(int).plot.hist() # Find correlations with the target and sort numeric_cols = df_train_test.select_dtypes(include=[np.number]).columns correlations = df_train_test[numeric_cols].corr()["TARGET"].sort_values() # Display correlations print("Most Positive Correlations:\n", correlations.tail(15)) print("\nMost Negative Correlations:\n", correlations.head(15)) # # Identify the categorical variable def identify_columns(df, threshold=25): # Identify categorical object columns, categorical numerical columns, and non-categorical columns cat_object_list = [ i for i in df.columns if df[i].dtype == "object" and df[i].nunique() < threshold ] cat_num_list = [ i for i in df.columns if df[i].dtype in ["int64", "float64"] and df[i].nunique() < threshold ] non_cat_list = [ i for i in df.columns if i not in cat_object_list and i not in cat_num_list ] # Identify object columns and numerical columns in non-categorical columns mix_serise_col = df[non_cat_list] non_cat_obj = [ i for i in mix_serise_col.columns if mix_serise_col[i].dtype == "object" ] non_cat_num = [ i for i in mix_serise_col.columns if mix_serise_col[i].dtype in ["int64", "float64"] ] # #Print the results # print('Categorical object columns:', len(cat_object_list)) # print('Categorical numerical columns:', len(cat_num_list)) # print('Non-categorical columns:', len(non_cat_list)) # print('Object columns in non-categorical columns:', len(non_cat_obj)) # print('Numerical columns in non-categorical columns:', len(non_cat_num)) # Return the results as a dictionary results = { "cat_object_list": cat_object_list, "cat_num_list": cat_num_list, "non_cat_list": non_cat_list, "non_cat_obj": non_cat_obj, "non_cat_num": non_cat_num, } return results results = identify_columns(df_train_test) categorical_num = identify_columns(df_train_test)["cat_num_list"] categorical_obj = identify_columns(df_train_test)["cat_object_list"] non_categorical_obj = identify_columns(df_train_test)["non_cat_obj"] print(categorical_obj) print(categorical_num) def plot_categorical_feature(feature, df=None, orientation_horizontal=True): if df is None: df = df_train_test else: df = df temp = df[feature].value_counts() df1 = pd.DataFrame({feature: temp.index, "Number of contracts": temp.values}) # Calculate the percentage of target=1 per category value cat_perc = df[[feature, "TARGET"]].groupby([feature], as_index=False).mean() cat_perc.sort_values(by="TARGET", ascending=False, inplace=True) sns.set_color_codes("colorblind") if orientation_horizontal == True: plt.figure(figsize=(15, 5)) plt.subplot(121) s1 = sns.barplot(y=feature, x="Number of contracts", data=df1) plt.subplot(122) s2 = sns.barplot(y=feature, x="TARGET", data=cat_perc) plt.xlabel("Fraction of loans defaulted", fontsize=12) plt.ylabel(feature, fontsize=12) else: plt.figure(figsize=(10, 12)) plt.subplot(211) s1 = sns.barplot(x=feature, y="Number of contracts", data=df1) s1.set_xticklabels(s1.get_xticklabels(), rotation=90) plt.subplot(212) s2 = sns.barplot(x=feature, y="TARGET", data=cat_perc) s2.set_xticklabels(s2.get_xticklabels(), rotation=90) plt.ylabel("Fraction of loans defaulted", fontsize=12) plt.xlabel(feature, fontsize=12) plt.tick_params(axis="both", which="major", labelsize=12) plt.subplots_adjust(wspace=0.6) plt.show() # Concatenate the two lists features = categorical_obj + non_categorical_obj # Loop over the features and plot each one for feature in features: plot_categorical_feature(feature) plot_categorical_feature("ORGANIZATION_TYPE", None, False) # **plot_bivariate_distribution** def plot_bivariate_distribution(feature, df): if df is None: df = df_train else: df = df plt.figure(figsize=(10, 4)) sns.kdeplot(df.loc[df["TARGET"] == 0, feature], label="TARGET == 0") sns.kdeplot(df.loc[df["TARGET"] == 1, feature], label="TARGET == 1") plt.xlabel(feature) plt.ylabel("Density") plt.title("Distribution of {} by Target Value".format(feature)) plt.legend() plot_bivariate_distribution("EXT_SOURCE_1", df_train) plot_bivariate_distribution("EXT_SOURCE_2", df_train) plot_bivariate_distribution("AMT_INCOME_TOTAL", df_train) plot_bivariate_distribution("DAYS_EMPLOYED", df_train) plot_bivariate_distribution("DAYS_BIRTH", df_train) # 1. Feature engineering # Converting days col into a year def create_day_to_year(df, ls_cols): for col in ls_cols: if col.startswith("DAYS") and np.issubdtype(df[col].dtype, np.number): new_col = col.replace("DAYS", "YEARS_UP") df[new_col] = round(np.abs(df[col] / 365)) df.drop(columns=[col], inplace=True) return df dfs = [ (df_train_test, "df_train_test"), (base_bureau_up, "base_bureau_up"), (base_previous_application_up, "base_previous_application_up"), (base_POS_CASH_balance_up, "base_POS_CASH_balance_up"), (base_bureau_balance_up, "base_bureau_balance_up"), (base_credit_card_balance_up, "base_credit_card_balance_up"), (base_installments_payments_up, "base_installments_payments_up"), ] for df, name in dfs: day_cols = df.filter(like="DAYS").columns df = create_day_to_year(df, day_cols) def print_years_cols(dfs): for df, name in dfs: day_cols = df.filter(like="YEARS_UP").columns print(f"Number of columns with 'YEARS' in {name}: {len(day_cols)}") print(f"Columns with 'YEARS' in {name}:") for col in day_cols: print(col) print_years_cols([(df_train_test, "df_train")]) plot_bivariate_distribution("YEARS_UP_BIRTH", df_train_test) # Converting income band; def create_income_band(df): bins = [ 0, 30000, 65000, 95000, 130000, 160000, 190000, 220000, 275000, 325000, np.inf, ] labels = range(1, len(bins)) df["INCOME_BAND"] = pd.cut(df["AMT_INCOME_TOTAL"], bins=bins, labels=labels) return df dfs = [(df_train_test, "df_train_test")] for df, name in dfs: df = create_income_band(df) fig, ax = plt.subplots(figsize=(5, 3)) sns.countplot(data=df_train_test, x="INCOME_BAND", hue="TARGET") ax.set_title("Income data for people repaying and defaulting loans") ax.set_ylabel("Count") plt.tight_layout() plt.show() # # Encoding & Missing value handeling def label_encoding(df): for col in df.columns: if df[col].dtype == "object": if len(df[col].unique()) == 2: le = LabelEncoder() le.fit(df[col]) df[col] = le.transform(df[col]) # else: # df[col] = pd.get_dummies(df[col]) return df def missing_values_table(df): # Check if input is a dataframe or a series if isinstance(df, pd.Series): df = pd.DataFrame(df) # Get columns with missing values na_columns = df.columns[df.isnull().any()].tolist() # Count missing values and calculate ratio n_miss = df[na_columns].isnull().sum().sort_values(ascending=False) ratio = (n_miss / df.shape[0] * 100).sort_values(ascending=False) # Create DataFrame with missing values and ratio missing_df = pd.concat( [n_miss, np.round(ratio, 2)], axis=1, keys=["Missing Values", "Percentage"] ) return missing_df def missing_preprocess_data(df): # Identify columns with more than 60% missing values missing_cols = df.columns[df.isnull().mean() > 0.6] # Drop columns with more than 60% missing values num_cols_dropped = len(missing_cols) df.drop(columns=missing_cols, inplace=True) print(f"Dropped {num_cols_dropped} columns due to missing value threshold") # Fill remaining missing values using median imputation imputer = SimpleImputer(strategy="median") df = pd.DataFrame(imputer.fit_transform(df), columns=df.columns) # Print out the original and preprocessed datasets print("Preprocessed dataset:") print(df) return df base_train_test_up = df_train_test.copy() label_encoding(base_train_test_up) base_train_test_up = pd.get_dummies(base_train_test_up) base_train_test_up.shape preprocessed_data = missing_preprocess_data(base_train_test_up) missing_values_table(preprocessed_data) # # Basic model alalysis # Basic Logistic regrassion # Split the data into training and testing sets train_X, test_X, train_Y, test_Y = train_test_split( preprocessed_data.drop(["SK_ID_CURR", "TARGET"], axis=1), preprocessed_data["TARGET"], test_size=0.25, random_state=123, ) # train_X contains the independent variables for the training set # test_X contains the independent variables for the testing set # train_Y contains the dependent variable (TARGET) for the training set # test_Y contains the dependent variable (TARGET) for the testing set def train_predict_visualize(train_X, train_Y, test_X, test_Y): # Train a logistic regression classifier clf = LogisticRegression(random_state=123) clf.fit(train_X, train_Y) # Predict using test data pred_Y = clf.predict(test_X) pred_prob_Y = clf.predict_proba(test_X)[:, 1] # Calculate metrics auc = roc_auc_score(test_Y, pred_prob_Y) tn, fp, fn, tp = confusion_matrix(test_Y, pred_Y).ravel() accuracy = (tp + tn) / (tp + tn + fp + fn) precision = tp / (tp + fp) recall = tp / (tp + fn) f1_score = 2 * precision * recall / (precision + recall) # Print metrics print("AUC:", auc) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("F1 Score:", f1_score) # Visualize feature importances coef = pd.Series(clf.coef_[0], index=train_X.columns) coef.nlargest(10).plot(kind="barh") plt.show() def train_predict_visualize(train_X, train_Y, test_X, test_Y): # Train a logistic regression classifier clf = LogisticRegression(random_state=123) clf.fit(train_X, train_Y) # Predict using test data pred_Y = clf.predict(test_X) pred_prob_Y = clf.predict_proba(test_X)[:, 1] # Calculate metrics auc = roc_auc_score(test_Y, pred_prob_Y) tn, fp, fn, tp = confusion_matrix(test_Y, pred_Y).ravel() accuracy = (tp + tn) / (tp + tn + fp + fn) precision = tp / (tp + fp) recall = tp / (tp + fn) f1_score = 2 * precision * recall / (precision + recall) # Print metrics print("AUC:", auc) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("F1 Score:", f1_score) # Visualize feature importances coef = pd.Series(clf.coef_[0], index=train_X.columns) top_features = coef.nlargest(10) top_features.plot(kind="barh") plt.show() # Print table of top 10 features print("Top 10 Features:") print(top_features.to_string()) train_predict_visualize(train_X, train_Y, test_X, test_Y) label_encoding(base_bureau_up) base_bureau_up = pd.get_dummies(base_bureau_up) df_bureau_up = missing_preprocess_data(base_bureau_up) missing_values_table(df_bureau_up) label_encoding(base_previous_application_up) base_previous_application_up = pd.get_dummies(base_previous_application_up) df_previous_application_up = missing_preprocess_data(base_previous_application_up) missing_values_table(df_previous_application_up) label_encoding(base_POS_CASH_balance_up) base_POS_CASH_balance_up = pd.get_dummies(base_POS_CASH_balance_up) df_POS_CASH_balance_up = missing_preprocess_data(base_POS_CASH_balance_up) missing_values_table(df_POS_CASH_balance_up) # # Domain Knowledge feature engeening # Domain knowledge # Debt-to-income-ratio preprocessed_data["CREDIT_INCOME_PERCENT"] = ( preprocessed_data["AMT_CREDIT"] / preprocessed_data["AMT_INCOME_TOTAL"] ) # Loan-to-Value-ration preprocessed_data["LOAN_TO_VALUE_RATION"] = ( preprocessed_data["AMT_CREDIT"] / preprocessed_data["AMT_GOODS_PRICE"] ) # percentage of the applicant's income that is being used to pay off the loan. preprocessed_data["ANNUITY_INCOME_PERCENT"] = ( preprocessed_data["AMT_ANNUITY"] / preprocessed_data["AMT_INCOME_TOTAL"] ) # This ratio represents the length of time it will take the applicant to pay off the loan. preprocessed_data["CREDIT_TERM"] = ( preprocessed_data["AMT_ANNUITY"] / preprocessed_data["AMT_CREDIT"] ) # Employment history percentage preprocessed_data["YEARS_EMPLOYED_PERCENT"] = ( preprocessed_data["YEARS_UP_EMPLOYED"] / preprocessed_data["YEARS_UP_BIRTH"] ) # age # -------- # ### Connecting the data aet # Merge preprocessed_data and df_bureau_up df1 = pd.merge(preprocessed_data, df_bureau_up, on="SK_ID_CURR", how="left") # # # Merge df1 and df_previous_application_up # df2 = pd.merge(df1, df_previous_application_up, on='SK_ID_CURR', how='left') # # # Merge df2 and df_bureau_balance_up # df3 = pd.merge(df2, base_bureau_balance_up, on='SK_ID_BUREAU', how='left') # # # # Merge df3 and df_POS_CASH_balance_up # df4 = pd.merge(df3, base_POS_CASH_balance_up, on='SK_ID_CURR', how='left') # # # # Merge df4 and df_installments_payments_up # df5 = pd.merge(df4, base_installments_payments_up, on='SK_ID_CURR', how='left') # # # Merge df5 and df_credit_card_balance_up # df = pd.merge(df1, base_credit_card_balance_up, on='SK_ID_CURR', how='left') # Get all column names of the dataframe col_names = df1.columns.tolist() # Print the column names print(col_names) # ### Poly feature engineering # Define the columns to include in the polynomial features poly_cols = [ "AMT_INCOME_TOTAL", "AMT_CREDIT", "AMT_ANNUITY", "YEARS_UP_BIRTH", "YEARS_UP_EMPLOYED", ] # Check if all columns exist in the dataframe if all(col in df1.columns for col in poly_cols): # Create polynomial features poly = PolynomialFeatures(degree=2, include_bias=False) poly_features = poly.fit_transform(df1[poly_cols]) poly_feature_names = poly.get_feature_names_out(poly_cols) poly_df = pd.DataFrame(poly_features, columns=poly_feature_names) # Merge the polynomial features with the original dataframe df1 = pd.concat([df1, poly_df], axis=1) else: print("One or more columns not found in dataframe") # Check for duplicated column names duplicated_cols = df1.columns[df1.columns.duplicated()] # Print the duplicated column names print("Duplicated columns:", duplicated_cols) # Remove duplicated columns if any if len(duplicated_cols) > 0: df1 = df1.loc[:, ~df1.columns.duplicated()] print("Duplicated columns removed") # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( df1.drop(["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"], axis=1), df1["TARGET"], test_size=0.2, random_state=42, ) # Train an XGBoost classifier xgb = XGBClassifier(n_estimators=100, random_state=42) xgb.fit(X_train, y_train) # Get the feature importances importances = xgb.feature_importances_ # Create a dataframe with the feature importances feature_importances = pd.DataFrame( {"feature": X_train.columns, "importance": importances} ) # Sort the dataframe by importance feature_importances = feature_importances.sort_values("importance", ascending=False) # Select the top 50 features top_features = feature_importances.head(50) # Create a new dataframe with only the top features df_top = df1[ ["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"] + top_features["feature"].tolist() ] # Save the new dataframe to a CSV file df_top.to_csv("data_top.csv", index=False) # Plot the feature importances plt.figure(figsize=(16, 32)) plt.barh(top_features["feature"], top_features["importance"]) plt.xlabel("Importance") plt.ylabel("Feature") plt.title("Feature Importances") plt.show() import xgboost as xgb # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( df_top.drop(["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"], axis=1), df_top["TARGET"], test_size=0.2, random_state=42, ) # Train an XGBClassifier xgb_model = xgb.XGBClassifier(n_estimators=100, random_state=42) xgb_model.fit(X_train, y_train) # Make predictions on the test set y_pred = xgb_model.predict(X_test) # Evaluate the performance of the model accuracy = accuracy_score(y_test, y_pred) precision = precision_score(y_test, y_pred) recall = recall_score(y_test, y_pred) f1 = f1_score(y_test, y_pred) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1) df_3 = df1.copy() # Remove any non-alphanumeric characters from the feature names df_3.columns = [re.sub("[^0-9a-zA-Z]+", "_", col) for col in df_3.columns] # Split the data into training and testing sets X_train_2, X_test_2, y_train_2, y_test_2 = train_test_split( df_3.drop(["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"], axis=1), df_3["TARGET"], test_size=0.2, random_state=42, ) # Train a LightGBM classifier lgbm_2 = lgb.LGBMClassifier(n_estimators=100, random_state=42) lgbm_2.fit(X_train_2, y_train_2) # Get the feature importances importances_2 = lgbm_2.feature_importances_ # Create a dataframe with the feature importances feature_importances_2 = pd.DataFrame( {"feature": X_train_2.columns, "importance": importances_2} ) # Sort the dataframe by importance feature_importances_2 = feature_importances_2.sort_values("importance", ascending=False) # Select the top 50 features top_features_2 = feature_importances_2.head(50) # Create a new dataframe with only the top features df_top_2 = df_3[ ["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"] + top_features_2["feature"].tolist() ] # Save the new dataframe to a CSV file df_top_2.to_csv("data_top_2.csv", index=False) # Plot the feature importances plt.figure(figsize=(16, 32)) plt.barh(top_features_2["feature"], top_features_2["importance"]) plt.xlabel("Importance") plt.ylabel("Feature") plt.title("Feature Importances") plt.show() import lightgbm as lgb # Remove any non-alphanumeric characters from the feature names df_top_2.columns = [re.sub("[^0-9a-zA-Z]+", "_", col) for col in df_top_2.columns] # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split( df_top_2.drop(["SK_ID_CURR", "SK_ID_BUREAU", "TARGET"], axis=1), df_top_2["TARGET"], test_size=0.2, random_state=42, ) # Train a LightGBM classifier lgbm = lgb.LGBMClassifier(n_estimators=100, random_state=42) lgbm.fit(X_train, y_train) # Make predictions on the test set y_pred = lgbm.predict(X_test) # Evaluate the performance of the model accuracy_lgbm = accuracy_score(y_test, y_pred) precision_lgbm = precision_score(y_test, y_pred) recall_lgbm = recall_score(y_test, y_pred) f1_lgbm = f1_score(y_test, y_pred) print("Accuracy:", accuracy) print("Precision:", precision) print("Recall:", recall) print("F1 score:", f1) import matplotlib.pyplot as plt import numpy as np # Define the evaluation metrics metrics = ["Accuracy", "Precision", "Recall", "F1 score"] xgb_scores = [accuracy, precision, recall, f1] lgbm_scores = [accuracy_lgbm, precision_lgbm, recall_lgbm, f1_lgbm] # Create a bar plot x = np.arange(len(metrics)) width = 0.35 fig, ax = plt.subplots() rects1 = ax.bar(x - width / 2, xgb_scores, width, label="XGBClassifier") rects2 = ax.bar(x + width / 2, lgbm_scores, width, label="LGBMClassifier") # Add labels and title ax.set_ylabel("Score") ax.set_xticks(x) ax.set_xticklabels(metrics) ax.legend() ax.set_title("Comparison of XGBClassifier and LGBMClassifier") # Add values above the bars def autolabel(rects): for rect in rects: height = rect.get_height() ax.annotate( "{:.3f}".format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha="center", va="bottom", ) autolabel(rects1) autolabel(rects2) plt.show() from sklearn.model_selection import KFold from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score # Define the number of folds k = 5 # Define the K-fold cross-validator kf = KFold(n_splits=k, shuffle=True, random_state=42) # Initialize empty lists to store the scores for each fold accuracy_scores_xgb = [] precision_scores_xgb = [] recall_scores_xgb = [] f1_scores_xgb = [] # Loop over the folds for train_index, test_index in kf.split(df_top): # Split the data into training and test sets X_train, X_test = ( df_top.drop(["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"], axis=1).iloc[train_index], df_top.drop(["SK_ID_CURR", "TARGET", "SK_ID_BUREAU"], axis=1).iloc[test_index], ) y_train, y_test = ( df_top["TARGET"].iloc[train_index], df_top["TARGET"].iloc[test_index], ) # Initialize the XGBClassifier model model = xgb.XGBClassifier(n_estimators=100, random_state=42) # Fit the model on the training data model.fit(X_train, y_train) # Make predictions on the test data y_pred = model.predict(X_test) # Calculate the evaluation metrics accuracy_xgb = accuracy_score(y_test, y_pred) precision_xgb = precision_score(y_test, y_pred) recall_xgb = recall_score(y_test, y_pred) f1_xgb = f1_score(y_test, y_pred) # Append the scores to the lists accuracy_scores_xgb.append(accuracy_xgb) precision_scores_xgb.append(precision_xgb) recall_scores_xgb.append(recall_xgb) f1_scores_xgb.append(f1_xgb) # Calculate the mean and standard deviation of the scores mean_accuracy = np.mean(accuracy_scores_xgb) std_accuracy = np.std(accuracy_scores_xgb) mean_precision = np.mean(precision_scores_xgb) std_precision = np.std(precision_scores_xgb) mean_recall = np.mean(recall_scores_xgb) std_recall = np.std(recall_scores_xgb) mean_f1 = np.mean(f1_scores_xgb) std_f1 = np.std(f1_scores_xgb) # Print the results print( "Accuracy_xgb: {:.2f}% (+/- {:.2f}%)".format( mean_accuracy * 100, std_accuracy * 100 ) ) print( "Precision_xgb: {:.2f}% (+/- {:.2f}%)".format( mean_precision * 100, std_precision * 100 ) ) print("Recall_xgb: {:.2f}% (+/- {:.2f}%)".format(mean_recall * 100, std_recall * 100)) print("F1 score_xgb: {:.2f}% (+/- {:.2f}%)".format(mean_f1 * 100, std_f1 * 100))
false
0
10,506
0
10,506
10,506
129011222
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # reference: # 1. [🚀Spaceship Titanic -📊EDA + 27 different models📈, SANSKAR HASIJA](https://www.kaggle.com/code/odins0n/spaceship-titanic-eda-27-different-models) # Introduction # The spaceship Titanic is an passenger and the spaceship had an accident. Based on given information of passenger, predict which passenger went to other dimension. # Submissions are evaluated based on their classification accuracy, the percentage of predicted labels that are correct. # Imports import numpy as np import pandas as pd ### EDA import seaborn as sns import plotly.express as px import matplotlib.pyplot as plt import plotly.graph_objects as go from plotly.subplots import make_subplots # Data Loading and Preparation train = pd.read_csv("../input/spaceship-titanic/train.csv") test = pd.read_csv("../input/spaceship-titanic/test.csv") submission = pd.read_csv("../input/spaceship-titanic/sample_submission.csv") RANDOM_STATE = 12 FOLDS = 5 STRATEGY = "median" # Columns Descriptions # - `PassengerId` - A unique Id for each passenger. Each Id takes the form gggg_pp where gggg indicates a group the passenger is travelling with and pp is their number within the group. People in a group are often family members, but not always. # - `HomePlanet` - The planet the passenger departed from, typically their planet of permanent residence. # - `CryoSleep` - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins. # - `Cabin` - The cabin number where the passenger is staying. Takes the form deck/num/side, where side can be either P for Port or S for Starboard. # - `Destination` - The planet the passenger will be debarking to. # - `Age` - The age of the passenger. # - `VIP` - Whether the passenger has paid for special VIP service during the voyage. # - `RoomService`, FoodCourt, ShoppingMall, Spa, VRDeck - Amount the passenger has billed at each of the Spaceship Titanic's many luxury amenities. # - `Name` - The first and last names of the passenger. # - `Transported` - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict. # Exploring Train Data train.head() print(f"train data shape: {train.shape}") print(f"Number of rows in train data: {train.shape[0]}") print(f"Number of columns in train data: {train.shape[1]}") print(f"Number of values in train data: {train.count().sum()}") print(f"Number missing values in train data: {sum(train.isna().sum())}") # 각 컬럼 별 값 train.count() # 각 컬럼 별 결측치 print(train.isna().sum().sort_values(ascending=False)) # 통계 train.describe() # Exploring Test Data test.head() print(f"test data shape: {test.shape}") print(f"Number of rows in test data: {test.shape[0]}") print(f"Number of columns in test data: {test.shape[1]}") print(f"Number of values in test data: {test.count().sum()}") print(f"Number missing values in test data: {sum(test.isna().sum())}") # 컬럼 별 유효값 test.count() # 컬럼 별 결측치 test.isna().sum().sort_values(ascending=False) # 통계 test.describe() # Exploring Submission Data submission.head() # EDA # Overview of Data # 불필요한 컬럼 drop train.drop(columns=["PassengerId"], inplace=True) test.drop(columns=["PassengerId"], inplace=True) # columns과 target column 분리 TARGET = "Transported" FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) print(FEATURES) # column들 통계 확인 train.iloc[:, :-1].describe().T.sort_values(by="mean", ascending=False) # mean 값을 기준으로 정렬 # T는 Transpose! # Exploring Submission Data # Column wise Null Value Distribution test_null = pd.DataFrame(test.isna().sum()) test.isna().sum() test_null test_null = test_null.sort_values(by=0, ascending=False) train_null = pd.DataFrame(train.isna().sum()) train_null train_null = train_null.sort_values(by=0, ascending=False)[:-1] fig = make_subplots( rows=1, cols=2, column_titles=[ "Train Data", "Test Data", ], x_title="Missing Values", ) fig.show() fig.add_trace( go.Bar( x=train_null[0], y=train_null.index, orientation="h", marker=dict( color=[n for n in range(12)], ), ), 1, 1, ) fig.add_trace( go.Bar( x=test_null[0], y=test_null.index, orientation="h", marker=dict( color=[n for n in range(12)], ), ), 1, 2, ) fig.update_layout( showlegend=False, title_text="Column wise Null Value Distribution", title_x=0.5 ) # Continuous and Categorical Data Distribution train.head() train.dtypes df = pd.concat([train[FEATURES], test[FEATURES]], axis=0) text_features = ["Cabin", "Name"] cat_features = [ col for col in FEATURES if df[col].nunique() < 25 and col not in text_features ] # nunique(): 데이터 속 고유값의 개수를 count 해줌 cont_features = [ col for col in FEATURES if df[col].nunique() >= 25 and col not in text_features ] del df # df 지움 print(f"Total number of features: {len(FEATURES)}") print(f"Number of categorical features: {len(cat_features)}") print(f"Number of continuos features: {len(cont_features)}") print(f"Number of text features: {len(text_features)}") labels = ["Categorical", "Continuous", "Text"] values = [len(cat_features), len(cont_features), len(text_features)] fig = go.Figure(data=[go.Pie(labels=labels, values=values, pull=[0.1, 0, 0])]) fig.show() # Correlation matrix fig = px.imshow
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/011/129011222.ipynb
null
null
[{"Id": 129011222, "ScriptId": 38248156, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8499050, "CreationDate": "05/10/2023 09:47:57", "VersionNumber": 1.0, "Title": "Spaceship Titanic - EDA", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 194.0, "LinesInsertedFromPrevious": 194.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # reference: # 1. [🚀Spaceship Titanic -📊EDA + 27 different models📈, SANSKAR HASIJA](https://www.kaggle.com/code/odins0n/spaceship-titanic-eda-27-different-models) # Introduction # The spaceship Titanic is an passenger and the spaceship had an accident. Based on given information of passenger, predict which passenger went to other dimension. # Submissions are evaluated based on their classification accuracy, the percentage of predicted labels that are correct. # Imports import numpy as np import pandas as pd ### EDA import seaborn as sns import plotly.express as px import matplotlib.pyplot as plt import plotly.graph_objects as go from plotly.subplots import make_subplots # Data Loading and Preparation train = pd.read_csv("../input/spaceship-titanic/train.csv") test = pd.read_csv("../input/spaceship-titanic/test.csv") submission = pd.read_csv("../input/spaceship-titanic/sample_submission.csv") RANDOM_STATE = 12 FOLDS = 5 STRATEGY = "median" # Columns Descriptions # - `PassengerId` - A unique Id for each passenger. Each Id takes the form gggg_pp where gggg indicates a group the passenger is travelling with and pp is their number within the group. People in a group are often family members, but not always. # - `HomePlanet` - The planet the passenger departed from, typically their planet of permanent residence. # - `CryoSleep` - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins. # - `Cabin` - The cabin number where the passenger is staying. Takes the form deck/num/side, where side can be either P for Port or S for Starboard. # - `Destination` - The planet the passenger will be debarking to. # - `Age` - The age of the passenger. # - `VIP` - Whether the passenger has paid for special VIP service during the voyage. # - `RoomService`, FoodCourt, ShoppingMall, Spa, VRDeck - Amount the passenger has billed at each of the Spaceship Titanic's many luxury amenities. # - `Name` - The first and last names of the passenger. # - `Transported` - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict. # Exploring Train Data train.head() print(f"train data shape: {train.shape}") print(f"Number of rows in train data: {train.shape[0]}") print(f"Number of columns in train data: {train.shape[1]}") print(f"Number of values in train data: {train.count().sum()}") print(f"Number missing values in train data: {sum(train.isna().sum())}") # 각 컬럼 별 값 train.count() # 각 컬럼 별 결측치 print(train.isna().sum().sort_values(ascending=False)) # 통계 train.describe() # Exploring Test Data test.head() print(f"test data shape: {test.shape}") print(f"Number of rows in test data: {test.shape[0]}") print(f"Number of columns in test data: {test.shape[1]}") print(f"Number of values in test data: {test.count().sum()}") print(f"Number missing values in test data: {sum(test.isna().sum())}") # 컬럼 별 유효값 test.count() # 컬럼 별 결측치 test.isna().sum().sort_values(ascending=False) # 통계 test.describe() # Exploring Submission Data submission.head() # EDA # Overview of Data # 불필요한 컬럼 drop train.drop(columns=["PassengerId"], inplace=True) test.drop(columns=["PassengerId"], inplace=True) # columns과 target column 분리 TARGET = "Transported" FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) print(FEATURES) # column들 통계 확인 train.iloc[:, :-1].describe().T.sort_values(by="mean", ascending=False) # mean 값을 기준으로 정렬 # T는 Transpose! # Exploring Submission Data # Column wise Null Value Distribution test_null = pd.DataFrame(test.isna().sum()) test.isna().sum() test_null test_null = test_null.sort_values(by=0, ascending=False) train_null = pd.DataFrame(train.isna().sum()) train_null train_null = train_null.sort_values(by=0, ascending=False)[:-1] fig = make_subplots( rows=1, cols=2, column_titles=[ "Train Data", "Test Data", ], x_title="Missing Values", ) fig.show() fig.add_trace( go.Bar( x=train_null[0], y=train_null.index, orientation="h", marker=dict( color=[n for n in range(12)], ), ), 1, 1, ) fig.add_trace( go.Bar( x=test_null[0], y=test_null.index, orientation="h", marker=dict( color=[n for n in range(12)], ), ), 1, 2, ) fig.update_layout( showlegend=False, title_text="Column wise Null Value Distribution", title_x=0.5 ) # Continuous and Categorical Data Distribution train.head() train.dtypes df = pd.concat([train[FEATURES], test[FEATURES]], axis=0) text_features = ["Cabin", "Name"] cat_features = [ col for col in FEATURES if df[col].nunique() < 25 and col not in text_features ] # nunique(): 데이터 속 고유값의 개수를 count 해줌 cont_features = [ col for col in FEATURES if df[col].nunique() >= 25 and col not in text_features ] del df # df 지움 print(f"Total number of features: {len(FEATURES)}") print(f"Number of categorical features: {len(cat_features)}") print(f"Number of continuos features: {len(cont_features)}") print(f"Number of text features: {len(text_features)}") labels = ["Categorical", "Continuous", "Text"] values = [len(cat_features), len(cont_features), len(text_features)] fig = go.Figure(data=[go.Pie(labels=labels, values=values, pull=[0.1, 0, 0])]) fig.show() # Correlation matrix fig = px.imshow
false
0
1,911
1
1,911
1,911
129011803
<jupyter_start><jupyter_text>UrbanSound8K This dataset contains 8732 labeled sound excerpts (&lt;=4s) of urban sounds from 10 classes: `air_conditioner`, `car_horn`, `children_playing`, `dog_bark`, `drilling`, `enginge_idling`, `gun_shot`, `jackhammer`, `siren`, and `street_music`. The classes are drawn from the urban sound taxonomy. For a detailed description of the dataset and how it was compiled please refer to our paper. All excerpts are taken from field recordings uploaded to www.freesound.org. The files are pre-sorted into ten folds (folders named fold1-fold10) to help in the reproduction of and comparison with the automatic classification results reported in the article above. In addition to the sound excerpts, a CSV file containing metadata about each excerpt is also provided. ## AUDIO FILES INCLUDED 8732 audio files of urban sounds (see description above) in WAV format. The sampling rate, bit depth, and number of channels are the same as those of the original file uploaded to Freesound (and hence may vary from file to file). ##META-DATA FILES INCLUDED ``` UrbanSound8k.csv ``` This file contains meta-data information about every audio file in the dataset. This includes: * slice_file_name: The name of the audio file. The name takes the following format: [fsID]-[classID]-[occurrenceID]-[sliceID].wav, where: [fsID] = the Freesound ID of the recording from which this excerpt (slice) is taken [classID] = a numeric identifier of the sound class (see description of classID below for further details) [occurrenceID] = a numeric identifier to distinguish different occurrences of the sound within the original recording [sliceID] = a numeric identifier to distinguish different slices taken from the same occurrence * fsID: The Freesound ID of the recording from which this excerpt (slice) is taken * start The start time of the slice in the original Freesound recording * end: The end time of slice in the original Freesound recording * salience: A (subjective) salience rating of the sound. 1 = foreground, 2 = background. * fold: The fold number (1-10) to which this file has been allocated. * classID: A numeric identifier of the sound class: 0 = air_conditioner 1 = car_horn 2 = children_playing 3 = dog_bark 4 = drilling 5 = engine_idling 6 = gun_shot 7 = jackhammer 8 = siren 9 = street_music * class: The class name: air_conditioner, car_horn, children_playing, dog_bark, drilling, engine_idling, gun_shot, jackhammer, siren, street_music. ##BEFORE YOU DOWNLOAD: AVOID COMMON PITFALLS! Since releasing the dataset we have noticed a couple of common mistakes that could invalidate your results, potentially leading to manuscripts being rejected or the publication of incorrect results. To avoid this, please read the following carefully: 1. Don't reshuffle the data! Use the predefined 10 folds and perform 10-fold (not 5-fold) cross validation The experiments conducted by vast majority of publications using UrbanSound8K (by ourselves and others) evaluate classification models via 10-fold cross validation using the predefined splits*. We strongly recommend following this procedure. Why? If you reshuffle the data (e.g. combine the data from all folds and generate a random train/test split) you will be incorrectly placing related samples in both the train and test sets, leading to inflated scores that don't represent your model's performance on unseen data. Put simply, your results will be wrong. Your results will NOT be comparable to previous results in the literature, meaning any claims to an improvement on previous research will be invalid. Even if you don't reshuffle the data, evaluating using different splits (e.g. 5-fold cross validation) will mean your results are not comparable to previous research. 2. Don't evaluate just on one split! Use 10-fold (not 5-fold) cross validation and average the scores We have seen reports that only provide results for a single train/test split, e.g. train on folds 1-9, test on fold 10 and report a single accuracy score. We strongly advise against this. Instead, perform 10-fold cross validation using the provided folds and report the average score. Why? Not all the splits are as "easy". That is, models tend to obtain much higher scores when trained on folds 1-9 and tested on fold 10, compared to (e.g.) training on folds 2-10 and testing on fold 1. For this reason, it is important to evaluate your model on each of the 10 splits and report the average accuracy. Again, your results will NOT be comparable to previous results in the literature. ## Acknowledgements We kindly request that articles and other works in which this dataset is used cite the following paper: J. Salamon, C. Jacoby and J. P. Bello, "A Dataset and Taxonomy for Urban Sound Research", 22nd ACM International Conference on Multimedia, Orlando USA, Nov. 2014. More information at https://urbansounddataset.weebly.com/urbansound8k.html Kaggle dataset identifier: urbansound8k <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Load data sounds_df = pd.read_csv("/kaggle/input/urbansound8k/UrbanSound8K.csv") sounds_df.head() sounds_freq = sounds_df["class"].value_counts().sort_values() print(sounds_freq) sounds_freq.plot( kind="pie", figsize=(5, 5), title="Sounds", autopct="%1.1f%%", shadow=False, fontsize=8, ) folds_freq = sounds_df["fold"].value_counts().sort_index() print(folds_freq) folds_freq.plot( kind="pie", figsize=(5, 5), title="Folds", autopct="%1.1f%%", shadow=False, fontsize=8, ) import matplotlib.pyplot as plt plt.figure(figsize=[25, 10]) for i in range(1, 11): fold_df = sounds_df[sounds_df["fold"] == i] fold_freq = fold_df["class"].value_counts() plt.subplot(2, 5, i) fold_freq.plot( kind="pie", title=f"fold {i}", autopct="%1.1f%%", shadow=False, fontsize=8 ) import librosa from scipy.stats import skew from scipy.stats import kurtosis def get_mfcc(filename, fold): wave, sr = librosa.load( f"../input/urbansound8k/fold{fold}/{filename}", mono=True, sr=22050 ) wave = librosa.util.normalize(wave) mfccs = librosa.feature.mfcc( y=wave, sr=sr, n_mfcc=40, hop_length=int(0.0232 * sr / 2.0), n_fft=int(0.0232 * sr), ) mfccs_min = mfccs.min(axis=1) mfccs_max = mfccs.max(axis=1) mfccs_median = np.median(mfccs, axis=1) mfccs_mean = np.mean(mfccs, axis=1) mfccs_var = np.var(mfccs, axis=1) mfccs_skewness = skew(mfccs, axis=1) mfccs_kurtosis = kurtosis(mfccs, axis=1) mfccs_first_derivative = np.diff(mfccs, n=1, axis=1) mfccs_first_derivative_mean = np.mean(mfccs_first_derivative, axis=1) mfccs_first_derivative_var = np.var(mfccs_first_derivative, axis=1) mfccs_second_derivative = np.diff(mfccs, n=2, axis=1) mfccs_second_derivative_mean = np.mean(mfccs_second_derivative, axis=1) mfccs_second_derivative_var = np.var(mfccs_second_derivative, axis=1) mfccs_stats = np.vstack( ( mfccs_min, mfccs_max, mfccs_median, mfccs_mean, mfccs_var, mfccs_skewness, mfccs_kurtosis, mfccs_first_derivative_mean, mfccs_first_derivative_var, mfccs_second_derivative_mean, mfccs_second_derivative_var, ) ) return pd.Series([mfccs, mfccs_stats.transpose()]) sounds_df["duration"] = sounds_df["end"] - sounds_df["start"] sounds_df.plot.hist(bins=10, column=["duration"], by="class", figsize=(5, 20)) plt.tight_layout() from tqdm import tqdm tqdm.pandas() sounds_df[["mfccs", "mfccs_stats"]] = sounds_df[ ["slice_file_name", "fold"] ].progress_apply(lambda x: get_mfcc(*x), axis=1) plt.figure(figsize=[15, 10]) for i in range(0, 9): ax = plt.subplot(3, 3, i + 1) img = librosa.display.specshow(sounds_df["mfccs"][i], x_axis="time") ax.set(title=sounds_df["class"][i]) plt.colorbar() plt.tight_layout() sounds_df.head() max_length = sounds_df["mfccs_stats"][0].shape print(max_length)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/011/129011803.ipynb
urbansound8k
chrisfilo
[{"Id": 129011803, "ScriptId": 38308333, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2493382, "CreationDate": "05/10/2023 09:53:14", "VersionNumber": 2.0, "Title": "UrbanSound8K Classifiction", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 84.0, "LinesInsertedFromPrevious": 27.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 57.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184692157, "KernelVersionId": 129011803, "SourceDatasetVersionId": 928025}]
[{"Id": 928025, "DatasetId": 500970, "DatasourceVersionId": 955383, "CreatorUserId": 2102373, "LicenseName": "Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)", "CreationDate": "02/04/2020 18:37:24", "VersionNumber": 1.0, "Title": "UrbanSound8K", "Slug": "urbansound8k", "Subtitle": "8732 labeled sound excerpts", "Description": "This dataset contains 8732 labeled sound excerpts (&lt;=4s) of urban sounds from 10 classes: `air_conditioner`, `car_horn`, `children_playing`, `dog_bark`, `drilling`, `enginge_idling`, `gun_shot`, `jackhammer`, `siren`, and `street_music`. The classes are drawn from the urban sound taxonomy. For a detailed description of the dataset and how it was compiled please refer to our paper.\nAll excerpts are taken from field recordings uploaded to www.freesound.org. The files are pre-sorted into ten folds (folders named fold1-fold10) to help in the reproduction of and comparison with the automatic classification results reported in the article above.\n\nIn addition to the sound excerpts, a CSV file containing metadata about each excerpt is also provided.\n\n## AUDIO FILES INCLUDED\n\n8732 audio files of urban sounds (see description above) in WAV format. The sampling rate, bit depth, and number of channels are the same as those of the original file uploaded to Freesound (and hence may vary from file to file).\n\n##META-DATA FILES INCLUDED\n```\nUrbanSound8k.csv\n\n```\nThis file contains meta-data information about every audio file in the dataset. This includes:\n\n* slice_file_name: \nThe name of the audio file. The name takes the following format: [fsID]-[classID]-[occurrenceID]-[sliceID].wav, where:\n[fsID] = the Freesound ID of the recording from which this excerpt (slice) is taken\n[classID] = a numeric identifier of the sound class (see description of classID below for further details)\n[occurrenceID] = a numeric identifier to distinguish different occurrences of the sound within the original recording\n[sliceID] = a numeric identifier to distinguish different slices taken from the same occurrence\n\n* fsID:\nThe Freesound ID of the recording from which this excerpt (slice) is taken\n\n* start\nThe start time of the slice in the original Freesound recording\n\n* end:\nThe end time of slice in the original Freesound recording\n\n* salience:\nA (subjective) salience rating of the sound. 1 = foreground, 2 = background.\n\n* fold:\nThe fold number (1-10) to which this file has been allocated.\n\n* classID:\nA numeric identifier of the sound class:\n0 = air_conditioner\n1 = car_horn\n2 = children_playing\n3 = dog_bark\n4 = drilling\n5 = engine_idling\n6 = gun_shot\n7 = jackhammer\n8 = siren\n9 = street_music\n\n* class:\nThe class name: air_conditioner, car_horn, children_playing, dog_bark, drilling, engine_idling, gun_shot, jackhammer, \nsiren, street_music.\n\n##BEFORE YOU DOWNLOAD: AVOID COMMON PITFALLS!\n\nSince releasing the dataset we have noticed a couple of common mistakes that could invalidate your results, potentially leading to manuscripts being rejected or the publication of incorrect results. To avoid this, please read the following carefully:\n\n1. Don't reshuffle the data! Use the predefined 10 folds and perform 10-fold (not 5-fold) cross validation\nThe experiments conducted by vast majority of publications using UrbanSound8K (by ourselves and others) evaluate classification models via 10-fold cross validation using the predefined splits*. We strongly recommend following this procedure.\n\nWhy?\nIf you reshuffle the data (e.g. combine the data from all folds and generate a random train/test split) you will be incorrectly placing related samples in both the train and test sets, leading to inflated scores that don't represent your model's performance on unseen data. Put simply, your results will be wrong.\nYour results will NOT be comparable to previous results in the literature, meaning any claims to an improvement on previous research will be invalid. Even if you don't reshuffle the data, evaluating using different splits (e.g. 5-fold cross validation) will mean your results are not comparable to previous research.\n\n2. Don't evaluate just on one split! Use 10-fold (not 5-fold) cross validation and average the scores\nWe have seen reports that only provide results for a single train/test split, e.g. train on folds 1-9, test on fold 10 and report a single accuracy score. We strongly advise against this. Instead, perform 10-fold cross validation using the provided folds and report the average score.\n\nWhy?\nNot all the splits are as \"easy\". That is, models tend to obtain much higher scores when trained on folds 1-9 and tested on fold 10, compared to (e.g.) training on folds 2-10 and testing on fold 1. For this reason, it is important to evaluate your model on each of the 10 splits and report the average accuracy.\nAgain, your results will NOT be comparable to previous results in the literature.\n\n\n## Acknowledgements\n\nWe kindly request that articles and other works in which this dataset is used cite the following paper:\n\nJ. Salamon, C. Jacoby and J. P. Bello, \"A Dataset and Taxonomy for Urban Sound Research\", 22nd ACM International Conference on Multimedia, Orlando USA, Nov. 2014.\n\nMore information at https://urbansounddataset.weebly.com/urbansound8k.html", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 500970, "CreatorUserId": 2102373, "OwnerUserId": 2102373.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 928025.0, "CurrentDatasourceVersionId": 955383.0, "ForumId": 514107, "Type": 2, "CreationDate": "02/04/2020 18:37:24", "LastActivityDate": "02/04/2020", "TotalViews": 70225, "TotalDownloads": 14601, "TotalVotes": 155, "TotalKernels": 137}]
[{"Id": 2102373, "UserName": "chrisfilo", "DisplayName": "Chris Gorgolewski", "RegisterDate": "07/26/2018", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Load data sounds_df = pd.read_csv("/kaggle/input/urbansound8k/UrbanSound8K.csv") sounds_df.head() sounds_freq = sounds_df["class"].value_counts().sort_values() print(sounds_freq) sounds_freq.plot( kind="pie", figsize=(5, 5), title="Sounds", autopct="%1.1f%%", shadow=False, fontsize=8, ) folds_freq = sounds_df["fold"].value_counts().sort_index() print(folds_freq) folds_freq.plot( kind="pie", figsize=(5, 5), title="Folds", autopct="%1.1f%%", shadow=False, fontsize=8, ) import matplotlib.pyplot as plt plt.figure(figsize=[25, 10]) for i in range(1, 11): fold_df = sounds_df[sounds_df["fold"] == i] fold_freq = fold_df["class"].value_counts() plt.subplot(2, 5, i) fold_freq.plot( kind="pie", title=f"fold {i}", autopct="%1.1f%%", shadow=False, fontsize=8 ) import librosa from scipy.stats import skew from scipy.stats import kurtosis def get_mfcc(filename, fold): wave, sr = librosa.load( f"../input/urbansound8k/fold{fold}/{filename}", mono=True, sr=22050 ) wave = librosa.util.normalize(wave) mfccs = librosa.feature.mfcc( y=wave, sr=sr, n_mfcc=40, hop_length=int(0.0232 * sr / 2.0), n_fft=int(0.0232 * sr), ) mfccs_min = mfccs.min(axis=1) mfccs_max = mfccs.max(axis=1) mfccs_median = np.median(mfccs, axis=1) mfccs_mean = np.mean(mfccs, axis=1) mfccs_var = np.var(mfccs, axis=1) mfccs_skewness = skew(mfccs, axis=1) mfccs_kurtosis = kurtosis(mfccs, axis=1) mfccs_first_derivative = np.diff(mfccs, n=1, axis=1) mfccs_first_derivative_mean = np.mean(mfccs_first_derivative, axis=1) mfccs_first_derivative_var = np.var(mfccs_first_derivative, axis=1) mfccs_second_derivative = np.diff(mfccs, n=2, axis=1) mfccs_second_derivative_mean = np.mean(mfccs_second_derivative, axis=1) mfccs_second_derivative_var = np.var(mfccs_second_derivative, axis=1) mfccs_stats = np.vstack( ( mfccs_min, mfccs_max, mfccs_median, mfccs_mean, mfccs_var, mfccs_skewness, mfccs_kurtosis, mfccs_first_derivative_mean, mfccs_first_derivative_var, mfccs_second_derivative_mean, mfccs_second_derivative_var, ) ) return pd.Series([mfccs, mfccs_stats.transpose()]) sounds_df["duration"] = sounds_df["end"] - sounds_df["start"] sounds_df.plot.hist(bins=10, column=["duration"], by="class", figsize=(5, 20)) plt.tight_layout() from tqdm import tqdm tqdm.pandas() sounds_df[["mfccs", "mfccs_stats"]] = sounds_df[ ["slice_file_name", "fold"] ].progress_apply(lambda x: get_mfcc(*x), axis=1) plt.figure(figsize=[15, 10]) for i in range(0, 9): ax = plt.subplot(3, 3, i + 1) img = librosa.display.specshow(sounds_df["mfccs"][i], x_axis="time") ax.set(title=sounds_df["class"][i]) plt.colorbar() plt.tight_layout() sounds_df.head() max_length = sounds_df["mfccs_stats"][0].shape print(max_length)
false
1
1,180
0
2,498
1,180
129011294
<jupyter_start><jupyter_text>Maadi Houses Prices Kaggle dataset identifier: maadi-houses-price <jupyter_script>import pandas as pd import numpy as np # # Analysis # # Data Wrangle # ## Gathering Data df = pd.read_csv( r"C:\Users\HP\Desktop\Maadi\Prediction-of-Maadi-homes-main\Maadi_Houses-main\Maadi_houses.csv" ) # ## Assessment Data and Find Issues # ### Programically df df.drop("Unnamed: 0", axis=1, inplace=True) df.describe() df.info() # ### Quality issues # - Sqm on area column # - Bathrooms on Bathrooms column # - Bedrooms on bedrooms column # - Egp on price column # - Data types # ### Tidy issues # - column place has two values # ## Cleaning # ### First Quality issues import re df["area_in_squared_meters"] = ( df["area"].apply(lambda x: re.search("\d+", x).group()).astype(int) ) df["price_in_egy"] = df["price"].apply(lambda x: str(x).split()[0]) df["bathrooms"] = ( df["bathrooms"].apply(lambda x: re.search("\d+", x).group()).astype(int) ) df["bedrooms"] = df["bedrooms"].apply(lambda x: re.search("\d+", x).group()).astype(int) df = df.drop(columns=["area", "price"]) df["price_in_egy"] = df["price_in_egy"].apply(lambda x: x.replace(",", "")).astype(int) df.dtypes # All Quality issues done # ### Tidy issues df["district"] = df["place"].apply(lambda x: x.split(",")[0]) df["talukas"] = df["place"].apply(lambda x: x.split(",")[1]) df = df.drop(columns="place") # # EDA import matplotlib.pyplot as plt import seaborn as sns sns.set() # # Univarite Analysis # ### Categrical Columns fig, ax = plt.subplots(figsize=(14, 8)) df["district"].value_counts().plot(kind="barh", ax=ax) plt.xlabel("Frequency", fontsize=18) plt.ylabel("District", fontsize=18) plt.title("District Freuqncies", fontsize=20) plt.box(False) plt.show() # - We need Maadi Only df = df[df["district"] == "Maadi"] fig, ax = plt.subplots(figsize=(14, 8)) df["district"].value_counts().plot(kind="barh", width=0.1, ax=ax) plt.xlabel("Frequency", fontsize=18) plt.ylabel("District", fontsize=18) plt.title("Frequency of district", fontsize=20) plt.box(False) plt.show() fig, ax = plt.subplots(figsize=(14, 8)) df["talukas"].value_counts().plot(kind="barh", ax=ax) plt.xlabel("Frequency") plt.ylabel("Talukas") plt.title("Frequency of talukas") plt.box(False) plt.show() # - Old Maadi & Maadi Zahraa & Maadi Cornishe & New Maadi Will not help When applying ML Model will be Missclassified or will not choosed on Training Data set so it isn't useful enough we can drop it or we can take more data about it by it will take more time so we do this now by dropping this records ( low cardinality ) # - So Limitations for this analysis are Maadi Degla and Maadi Sarayat only df = df[ df["talukas"].isin(["Old Maadi", "Maadi Zahraa", "Maadi Cornishe", "New Maadi"]) == False ] fig, ax = plt.subplots(figsize=(14, 8)) df["talukas"].value_counts().plot(kind="barh", ax=ax) plt.xlabel("Frequency", fontsize=18) plt.ylabel("Talukas", fontsize=18) plt.title("Frequency of talukas", fontsize=20) plt.box(False) plt.show() fig, ax = plt.subplots(figsize=(14, 8)) df["finishing_level"].value_counts().plot(kind="barh", ax=ax) plt.xlabel("Frequency", fontsize=18) plt.ylabel("Finishing Level", fontsize=18) plt.title("Frequency of Finishing Level", fontsize=20) plt.box(False) plt.show() # - New Limitation Semi Finished and Finished will not help us ,because we have small amout of informations about it ( low cardinality ) # - Our Limitations are Unfurnished ,Modern Furnished ,Furnished ,Semi Furnished df = df[df["finishing_level"].isin(["Semi Finished", "Finished"]) == False] fig, ax = plt.subplots(figsize=(14, 8)) df["properties"].value_counts().plot(kind="barh", ax=ax) plt.xlabel("Frequency", fontsize=18) plt.ylabel("Properties", fontsize=18) plt.title("Frequency of Properties", fontsize=20) plt.box(False) plt.show() # - New Limitation Villas and Twin Houses will not help us ,we have small amount of informaitons about it ( low cardinality ) # - Our Limitations are Penthouses ,Duplexes ,Ground Floors ,Apartments df = df[df["properties"].isin(["Villas", "Twin Houses"]) == False] # ## Numerical Columns Numerical_Columns = [_ for _ in df.select_dtypes(include=["int"]).columns] Numerical_Columns.remove("bathrooms") Numerical_Columns.remove("bedrooms") fig, ax = plt.subplots( len(Numerical_Columns), 1, figsize=(14, 8), constrained_layout=True ) for _, _1 in zip(Numerical_Columns, ax): _1.hist(df[_], alpha=0.5) _1.set_xlabel(_) _1.set_ylabel("Frequency") _1.set_title(f"{_} Distribution") _1.spines["top"].set_color("none") _1.spines["bottom"].set_color("none") _1.spines["left"].set_color("none") _1.spines["right"].set_color("none") # - On Prices,area Column there are few Outliers ,i can reduce their impact using Robust Scaler which is combining of Medium and IQR # # Bivariate Analysis X = df.drop("price_in_egy", axis=1) y = df["price_in_egy"] Num_Columns = [] Cat_Columns = [] for _ in X.columns: if X[_].dtype == "int": Num_Columns.append(_) else: Cat_Columns.append(_) fig, ax = plt.subplots(figsize=(14, 8)) plt.scatter(df[Num_Columns[0]], df["price_in_egy"]) plt.xlabel("Bathrooms", fontsize=18) plt.ylabel("Prices", fontsize=18) plt.title("Bathroom vs Prices", fontsize=18) plt.box(False) plt.show() fig, ax = plt.subplots(figsize=(14, 8)) plt.scatter(df[Num_Columns[1]], df["price_in_egy"]) plt.xlabel("Bedrooms", fontsize=18) plt.ylabel("Prices", fontsize=18) plt.title("Bedrooms vs Prices", fontsize=18) plt.box(False) plt.show() fig, ax = plt.subplots(figsize=(14, 8)) plt.scatter(df[Num_Columns[2]], df["price_in_egy"]) plt.xlabel("Area", fontsize=18) plt.ylabel("Prices", fontsize=18) plt.title("Area vs Prices", fontsize=18) plt.box(False) plt.show() # Now it's pretty, good views,normal data and positive correlations between label and features # # ML from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import OneHotEncoder from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score, KFold from sklearn.model_selection import GridSearchCV from sklearn.compose import ColumnTransformer from sklearn.metrics import make_scorer, mean_squared_error, r2_score from sklearn.linear_model import Ridge Num_trans = Pipeline([("Scaling", RobustScaler())]) Cat_trans = Pipeline([("OneHotEncoding", OneHotEncoder(drop="first"))]) Preprocessing = ColumnTransformer( [("cat", Cat_trans, Cat_Columns), ("num", Num_trans, Num_Columns)] ) X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.8, random_state=0 ) # # BaseLine Model baseline = mean_squared_error(y_train, [y_train.mean()] * len(y_train)) baseline # # Comparison Model mse = make_scorer(mean_squared_error) model = Pipeline([("Transformation", Preprocessing), ("model", Ridge())]) cv = cross_val_score( model, X_train, y_train, cv=KFold(n_splits=8, shuffle=True, random_state=0), n_jobs=-1, scoring=mse, ) cv_mse = np.mean(cv) model.fit(X_train, y_train) train_mse = mean_squared_error(y_train, model.predict(X_train)) np.std(cv), np.mean(cv) fig, ax = plt.subplots(figsize=(14, 8)) sns.boxplot(y=cv) plt.box(False) plt.show() # # Comparison Between Baseline and Comparison Model fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(baseline, color="red", label="BaseLine") plt.axhline(train_mse, color="blue", label="Train") plt.axhline(cv_mse, color="orange", label="Cross Validation") plt.box(False) plt.legend() plt.show() # - Our Model Performs well # - cross validation and training msq are less than Baseline Model # - The CV varies a lots # # Final Evaluate fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(baseline, color="red", label="Baseline") plt.axhline(train_mse, color="black", label="Train") plt.axhline(cv_mse, color="purple", label="CV Score") plt.axhline( mean_squared_error(y_test, model.predict(X_test)), color="green", label="Final Evaluation", ) plt.box(False) plt.legend() plt.show() # - Testset also perform very well than Cross Validation # # Tuning param_grid = dict(model__alpha=[0.00001, 0.0001, 0.001, 0.01, 0.1]) GS = GridSearchCV( model, param_grid=param_grid, scoring=mse, n_jobs=-1, cv=KFold(n_splits=8, shuffle=True, random_state=0), ) GS.fit(X_train, y_train) GS.best_params_ fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline( mean_squared_error(y_test, model.predict(X_test)), color="green", label="Before Tuning", ) plt.axhline( mean_squared_error(y_test, GS.best_estimator_.predict(X_test)), color="purple", label="After Tuning", ) plt.box(False) plt.legend() plt.show() df1 = pd.DataFrame( {"predicts": GS.best_estimator_.predict(X_test), "actual_values": y_test} ) fig, ax = plt.subplots(figsize=(14, 8)) ax = sns.kdeplot(df1["predicts"], label="Predicitons of test set") sns.kdeplot(df1["actual_values"], label="Actual Values of test set", ax=ax) sns.despine(left=True, right=True, top=True, bottom=True) plt.xlabel("Predictions") plt.legend() plt.show() # # Check Assumptions # ## Target Distribution is Normal from scipy.stats import shapiro # test the normality of the variable stat, p = shapiro(y) # interpret the p-value alpha = 0.05 if p > alpha: print("Data looks normally distributed (fail to reject H0)") else: print("Data does not look normally distributed (reject H0)") plt.subplots(figsize=(14, 8)) sns.histplot(y) plt.xlabel("Prices") plt.ylabel("Frequency") plt.box(False) plt.title("Prices Distribtuion") plt.show() # - the assumption is violated from sklearn.preprocessing import PowerTransformer # ## Relation Between Features and Target is Linear fig, ax = plt.subplots(figsize=(14, 8)) sns.scatterplot(x=y_train, y=GS.best_estimator_.predict(X_train)) plt.box(False) plt.xlabel("Actual Values") plt.ylabel("Predictions") plt.show() # - First Assumption is met # ## The Distribution of Residuals is Normal # test the normality of the variable stat, p = shapiro( np.array(y_train).reshape(-1, 1) - GS.best_estimator_.predict(X_train) ) # interpret the p-value alpha = 0.05 if p > alpha: print("Data looks normally distributed (fail to reject H0)") else: print("Data does not look normally distributed (reject H0)") plt.subplots(figsize=(14, 8)) sns.histplot(y_train - GS.best_estimator_.predict(X_train)) plt.xlabel("Resiudals") plt.ylabel("Frequency") plt.title("Residuals Distribtion") plt.box(False) plt.show() # - Second assumption is violated # ## Independent and Homoscedasticity import statsmodels.api as sm from statsmodels.graphics.tsaplots import plot_acf plt.subplots(figsize=(14, 8)) sns.scatterplot(x=y_train, y=y_train - GS.best_estimator_.predict(X_train)) plt.axhline(0) plt.box(False) plt.show() acf = sm.tsa.acf(y_train - GS.best_estimator_.predict(X_train), nlags=50) # compute the standard error of the ACF n = len(y_train - GS.best_estimator_.predict(X_train)) stderr = 1.96 / np.sqrt(n) # plot the ACF with confidence intervals fig, ax = plt.subplots(figsize=(14, 8)) ax.stem(acf) ax.axhline(y=stderr, linestyle="--", color="gray") ax.axhline(y=-stderr, linestyle="--", color="gray") plt.box(False) plt.show() test = sm.stats.stattools.durbin_watson(y_train - GS.best_estimator_.predict(X_train)) print(f"Durbin Watson Test Score {test}") # - Independence is met ,the variance is constant or about to be constant is also met # ## Multicolinearity VIFS = pd.get_dummies(X, drop_first=True) from statsmodels.stats.outliers_influence import variance_inflation_factor # VIF dataframe VIFS = pd.get_dummies(X, drop_first=True) vif_data = pd.DataFrame() vif_data["feature"] = VIFS.columns # calculating VIF for each feature vif_data["VIF"] = [ variance_inflation_factor(VIFS.values, i) for i in range(len(VIFS.columns)) ] vif_data # - baseline 20 X = X.drop(columns="bedrooms") from statsmodels.stats.outliers_influence import variance_inflation_factor # VIF dataframe VIFS = pd.get_dummies(X, drop_first=True) vif_data = pd.DataFrame() vif_data["feature"] = VIFS.columns # calculating VIF for each feature vif_data["VIF"] = [ variance_inflation_factor(VIFS.values, i) for i in range(len(VIFS.columns)) ] vif_data Num_Columns.remove("bedrooms") # # Iterative Process Num_trans = Pipeline([("Scaling", RobustScaler())]) Cat_trans = Pipeline([("OneHotEncoding", OneHotEncoder(drop="first"))]) Preprocessing = ColumnTransformer( [("cat", Cat_trans, Cat_Columns), ("num", Num_trans, Num_Columns)] ) X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.8, random_state=0 ) PT = PowerTransformer(method="box-cox") y_train_max = PT.fit_transform(np.array(y_train).reshape(-1, 1)) test, p = shapiro(y_train_max) # interpret the p-value alpha = 0.05 if p > alpha: print("Data looks normally distributed (fail to reject H0)") else: print("Data does not look normally distributed (reject H0)") Baseline1 = mean_squared_error(y_train_max, [np.mean(y_train_max)] * len(y_train_max)) model = Pipeline([("Transformation", Preprocessing), ("model", Ridge())]) cv = cross_val_score( model, X_train, y_train_max, cv=KFold(n_splits=8, shuffle=True, random_state=0), n_jobs=-1, scoring=mse, ) msq_cv = np.mean(cv) model.fit(X_train, y_train_max) msq_train = mean_squared_error(y_train_max, model.predict(X_train)) # # Evaluate fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(Baseline1, color="red", label="BaseLine") plt.axhline(msq_train, color="blue", label="Train") plt.axhline(msq_cv, color="green", label="Cross Validation") plt.box(False) plt.legend() plt.show() # # Final Evaluate fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(Baseline1, color="red", label="BaseLine") plt.axhline(msq_train, color="orange", label="Training") plt.axhline(msq_cv, color="blue", label="Cross Validation") plt.axhline( mean_squared_error( PT.transform(np.array(y_test).reshape(-1, 1)), model.predict(X_test) ), color="green", label="Test", ) plt.box(False) plt.legend() plt.show() # # Tuning param_grid = dict(model__alpha=[0.00001, 0.0001, 0.001, 0.01, 0.1]) GS = GridSearchCV( model, param_grid=param_grid, scoring=mse, n_jobs=-1, cv=KFold(n_splits=8, shuffle=True, random_state=0), ) GS.fit(X_train, y_train_max) plt.subplots(figsize=(14, 8)) plt.axhline( mean_squared_error( PT.transform(np.array(y_test).reshape(-1, 1)), model.predict(X_test) ), color="green", label="Old", ) plt.axhline( mean_squared_error( PT.transform(np.array(y_test).reshape(-1, 1)), GS.fit(X_train, y_train_max).predict(X_test), ), color="purple", label="New", ) plt.box(False) plt.legend() plt.show() # # Check Assumptions # ## Relation Between Features and Target is Linear fig, ax = plt.subplots(figsize=(14, 8)) sns.scatterplot( x=pd.DataFrame(y_train_max)[0], y=pd.DataFrame(GS.best_estimator_.predict(X_train))[0], ) plt.box(False) plt.xlabel("Actual Values") plt.ylabel("Predictions") plt.show() # ## Independent and Homoscedasticity plt.subplots(figsize=(14, 8)) sns.scatterplot( x=pd.DataFrame(y_train_max)[0], y=pd.DataFrame(y_train_max)[0] - pd.DataFrame(GS.best_estimator_.predict(X_train))[0], ) plt.axhline(0) plt.box(False) plt.show() acf = sm.tsa.acf( pd.DataFrame(y_train_max)[0] - pd.DataFrame(GS.best_estimator_.predict(X_train))[0], nlags=50, ) # compute the standard error of the ACF n = len(y_train_max - GS.best_estimator_.predict(X_train)) stderr = 1.96 / np.sqrt(n) # plot the ACF with confidence intervals fig, ax = plt.subplots(figsize=(14, 8)) ax.stem(acf) ax.axhline(y=stderr, linestyle="--", color="gray") ax.axhline(y=-stderr, linestyle="--", color="gray") plt.box(False) plt.show() test = sm.stats.stattools.durbin_watson( pd.DataFrame(y_train_max)[0] - pd.DataFrame(GS.best_estimator_.predict(X_train))[0] ) print(f"Durbin Watson Test Score {test}") # ## The Distribution of Residuals is Normal plt.subplots(figsize=(14, 8)) sns.histplot( pd.DataFrame(y_train_max)[0] - pd.DataFrame(GS.best_estimator_.predict(X_train))[0] ) plt.xlabel("Resiudals") plt.ylabel("Frequency") plt.title("Residuals Distribtion") plt.box(False) plt.show() # test the normality of the variable stat, p = shapiro( pd.DataFrame(y_train_max)[0] - pd.DataFrame(GS.best_estimator_.predict(X_train))[0] ) # interpret the p-value alpha = 0.05 if p > alpha: print("Data looks normally distributed (fail to reject H0)") else: print("Data does not look normally distributed (reject H0)") # - the model doesn't meet only one assumption df2 = pd.merge( pd.DataFrame(GS.best_estimator_.predict(X_test), columns=["Predictions"]), pd.DataFrame( PT.transform(np.array(y_test).reshape(-1, 1)), columns=["Actual Values"] ), left_index=True, right_index=True, ) df2[["Predictions"]] = PT.inverse_transform(df2[["Predictions"]]) df2[["Actual Values"]] = PT.inverse_transform(df2[["Actual Values"]]) fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(14, 8)) sns.kdeplot(df2["Predictions"], label="Predictions", ax=ax0) sns.kdeplot(df2["Actual Values"], ax=ax0, label="Actual Values") ax0.set_title("New") sns.kdeplot(df1["predicts"], label="Predictions", ax=ax1) sns.kdeplot(df1["actual_values"], ax=ax1, label="Actual Values") ax1.set_title("Old") plt.box(False) plt.legend() plt.show() if mean_squared_error(df2["Actual Values"], df2["Predictions"]) < mean_squared_error( df1["actual_values"], df1["predicts"] ): print("Updates are reducing the error") else: print("Updates are not reducing the error") # # Non Parametric Model from xgboost import XGBRegressor model = Pipeline([("Transformation", Preprocessing), ("model", XGBRegressor())]) cv = cross_val_score( model, X_train, y_train, cv=KFold(n_splits=8, shuffle=True, random_state=0), n_jobs=-1, scoring=mse, ) msq_cv = np.mean(cv) model.fit(X_train, y_train) msq_train = mean_squared_error(y_train, model.predict(X_train)) fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(baseline, color="red", label="BaseLine") plt.axhline(msq_train, color="blue", label="Train") plt.axhline(msq_cv, color="green", label="Cross Validation") plt.box(False) plt.legend() plt.show() fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(baseline, color="red", label="BaseLine") plt.axhline(msq_train, color="orange", label="Training") plt.axhline(msq_cv, color="blue", label="Cross Validation") plt.axhline( mean_squared_error(y_test, model.predict(X_test)), color="green", label="Test" ) plt.box(False) plt.legend() plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/011/129011294.ipynb
maadi-houses-price
mahmoudadel2
[{"Id": 129011294, "ScriptId": 38349761, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9931887, "CreationDate": "05/10/2023 09:48:34", "VersionNumber": 3.0, "Title": "Maadi Houses Analysis + ML \ud83d\udd25", "EvaluationDate": "05/10/2023", "IsChange": false, "TotalLines": 609.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 609.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184691305, "KernelVersionId": 129011294, "SourceDatasetVersionId": 5653755}]
[{"Id": 5653755, "DatasetId": 3249691, "DatasourceVersionId": 5729133, "CreatorUserId": 9931887, "LicenseName": "Unknown", "CreationDate": "05/10/2023 09:36:32", "VersionNumber": 1.0, "Title": "Maadi Houses Prices", "Slug": "maadi-houses-price", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3249691, "CreatorUserId": 9931887, "OwnerUserId": 9931887.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5653755.0, "CurrentDatasourceVersionId": 5729133.0, "ForumId": 3315055, "Type": 2, "CreationDate": "05/10/2023 09:36:32", "LastActivityDate": "05/10/2023", "TotalViews": 39, "TotalDownloads": 5, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 9931887, "UserName": "mahmoudadel2", "DisplayName": "Mahmoud adel", "RegisterDate": "03/15/2022", "PerformanceTier": 1}]
import pandas as pd import numpy as np # # Analysis # # Data Wrangle # ## Gathering Data df = pd.read_csv( r"C:\Users\HP\Desktop\Maadi\Prediction-of-Maadi-homes-main\Maadi_Houses-main\Maadi_houses.csv" ) # ## Assessment Data and Find Issues # ### Programically df df.drop("Unnamed: 0", axis=1, inplace=True) df.describe() df.info() # ### Quality issues # - Sqm on area column # - Bathrooms on Bathrooms column # - Bedrooms on bedrooms column # - Egp on price column # - Data types # ### Tidy issues # - column place has two values # ## Cleaning # ### First Quality issues import re df["area_in_squared_meters"] = ( df["area"].apply(lambda x: re.search("\d+", x).group()).astype(int) ) df["price_in_egy"] = df["price"].apply(lambda x: str(x).split()[0]) df["bathrooms"] = ( df["bathrooms"].apply(lambda x: re.search("\d+", x).group()).astype(int) ) df["bedrooms"] = df["bedrooms"].apply(lambda x: re.search("\d+", x).group()).astype(int) df = df.drop(columns=["area", "price"]) df["price_in_egy"] = df["price_in_egy"].apply(lambda x: x.replace(",", "")).astype(int) df.dtypes # All Quality issues done # ### Tidy issues df["district"] = df["place"].apply(lambda x: x.split(",")[0]) df["talukas"] = df["place"].apply(lambda x: x.split(",")[1]) df = df.drop(columns="place") # # EDA import matplotlib.pyplot as plt import seaborn as sns sns.set() # # Univarite Analysis # ### Categrical Columns fig, ax = plt.subplots(figsize=(14, 8)) df["district"].value_counts().plot(kind="barh", ax=ax) plt.xlabel("Frequency", fontsize=18) plt.ylabel("District", fontsize=18) plt.title("District Freuqncies", fontsize=20) plt.box(False) plt.show() # - We need Maadi Only df = df[df["district"] == "Maadi"] fig, ax = plt.subplots(figsize=(14, 8)) df["district"].value_counts().plot(kind="barh", width=0.1, ax=ax) plt.xlabel("Frequency", fontsize=18) plt.ylabel("District", fontsize=18) plt.title("Frequency of district", fontsize=20) plt.box(False) plt.show() fig, ax = plt.subplots(figsize=(14, 8)) df["talukas"].value_counts().plot(kind="barh", ax=ax) plt.xlabel("Frequency") plt.ylabel("Talukas") plt.title("Frequency of talukas") plt.box(False) plt.show() # - Old Maadi & Maadi Zahraa & Maadi Cornishe & New Maadi Will not help When applying ML Model will be Missclassified or will not choosed on Training Data set so it isn't useful enough we can drop it or we can take more data about it by it will take more time so we do this now by dropping this records ( low cardinality ) # - So Limitations for this analysis are Maadi Degla and Maadi Sarayat only df = df[ df["talukas"].isin(["Old Maadi", "Maadi Zahraa", "Maadi Cornishe", "New Maadi"]) == False ] fig, ax = plt.subplots(figsize=(14, 8)) df["talukas"].value_counts().plot(kind="barh", ax=ax) plt.xlabel("Frequency", fontsize=18) plt.ylabel("Talukas", fontsize=18) plt.title("Frequency of talukas", fontsize=20) plt.box(False) plt.show() fig, ax = plt.subplots(figsize=(14, 8)) df["finishing_level"].value_counts().plot(kind="barh", ax=ax) plt.xlabel("Frequency", fontsize=18) plt.ylabel("Finishing Level", fontsize=18) plt.title("Frequency of Finishing Level", fontsize=20) plt.box(False) plt.show() # - New Limitation Semi Finished and Finished will not help us ,because we have small amout of informations about it ( low cardinality ) # - Our Limitations are Unfurnished ,Modern Furnished ,Furnished ,Semi Furnished df = df[df["finishing_level"].isin(["Semi Finished", "Finished"]) == False] fig, ax = plt.subplots(figsize=(14, 8)) df["properties"].value_counts().plot(kind="barh", ax=ax) plt.xlabel("Frequency", fontsize=18) plt.ylabel("Properties", fontsize=18) plt.title("Frequency of Properties", fontsize=20) plt.box(False) plt.show() # - New Limitation Villas and Twin Houses will not help us ,we have small amount of informaitons about it ( low cardinality ) # - Our Limitations are Penthouses ,Duplexes ,Ground Floors ,Apartments df = df[df["properties"].isin(["Villas", "Twin Houses"]) == False] # ## Numerical Columns Numerical_Columns = [_ for _ in df.select_dtypes(include=["int"]).columns] Numerical_Columns.remove("bathrooms") Numerical_Columns.remove("bedrooms") fig, ax = plt.subplots( len(Numerical_Columns), 1, figsize=(14, 8), constrained_layout=True ) for _, _1 in zip(Numerical_Columns, ax): _1.hist(df[_], alpha=0.5) _1.set_xlabel(_) _1.set_ylabel("Frequency") _1.set_title(f"{_} Distribution") _1.spines["top"].set_color("none") _1.spines["bottom"].set_color("none") _1.spines["left"].set_color("none") _1.spines["right"].set_color("none") # - On Prices,area Column there are few Outliers ,i can reduce their impact using Robust Scaler which is combining of Medium and IQR # # Bivariate Analysis X = df.drop("price_in_egy", axis=1) y = df["price_in_egy"] Num_Columns = [] Cat_Columns = [] for _ in X.columns: if X[_].dtype == "int": Num_Columns.append(_) else: Cat_Columns.append(_) fig, ax = plt.subplots(figsize=(14, 8)) plt.scatter(df[Num_Columns[0]], df["price_in_egy"]) plt.xlabel("Bathrooms", fontsize=18) plt.ylabel("Prices", fontsize=18) plt.title("Bathroom vs Prices", fontsize=18) plt.box(False) plt.show() fig, ax = plt.subplots(figsize=(14, 8)) plt.scatter(df[Num_Columns[1]], df["price_in_egy"]) plt.xlabel("Bedrooms", fontsize=18) plt.ylabel("Prices", fontsize=18) plt.title("Bedrooms vs Prices", fontsize=18) plt.box(False) plt.show() fig, ax = plt.subplots(figsize=(14, 8)) plt.scatter(df[Num_Columns[2]], df["price_in_egy"]) plt.xlabel("Area", fontsize=18) plt.ylabel("Prices", fontsize=18) plt.title("Area vs Prices", fontsize=18) plt.box(False) plt.show() # Now it's pretty, good views,normal data and positive correlations between label and features # # ML from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import OneHotEncoder from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score, KFold from sklearn.model_selection import GridSearchCV from sklearn.compose import ColumnTransformer from sklearn.metrics import make_scorer, mean_squared_error, r2_score from sklearn.linear_model import Ridge Num_trans = Pipeline([("Scaling", RobustScaler())]) Cat_trans = Pipeline([("OneHotEncoding", OneHotEncoder(drop="first"))]) Preprocessing = ColumnTransformer( [("cat", Cat_trans, Cat_Columns), ("num", Num_trans, Num_Columns)] ) X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.8, random_state=0 ) # # BaseLine Model baseline = mean_squared_error(y_train, [y_train.mean()] * len(y_train)) baseline # # Comparison Model mse = make_scorer(mean_squared_error) model = Pipeline([("Transformation", Preprocessing), ("model", Ridge())]) cv = cross_val_score( model, X_train, y_train, cv=KFold(n_splits=8, shuffle=True, random_state=0), n_jobs=-1, scoring=mse, ) cv_mse = np.mean(cv) model.fit(X_train, y_train) train_mse = mean_squared_error(y_train, model.predict(X_train)) np.std(cv), np.mean(cv) fig, ax = plt.subplots(figsize=(14, 8)) sns.boxplot(y=cv) plt.box(False) plt.show() # # Comparison Between Baseline and Comparison Model fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(baseline, color="red", label="BaseLine") plt.axhline(train_mse, color="blue", label="Train") plt.axhline(cv_mse, color="orange", label="Cross Validation") plt.box(False) plt.legend() plt.show() # - Our Model Performs well # - cross validation and training msq are less than Baseline Model # - The CV varies a lots # # Final Evaluate fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(baseline, color="red", label="Baseline") plt.axhline(train_mse, color="black", label="Train") plt.axhline(cv_mse, color="purple", label="CV Score") plt.axhline( mean_squared_error(y_test, model.predict(X_test)), color="green", label="Final Evaluation", ) plt.box(False) plt.legend() plt.show() # - Testset also perform very well than Cross Validation # # Tuning param_grid = dict(model__alpha=[0.00001, 0.0001, 0.001, 0.01, 0.1]) GS = GridSearchCV( model, param_grid=param_grid, scoring=mse, n_jobs=-1, cv=KFold(n_splits=8, shuffle=True, random_state=0), ) GS.fit(X_train, y_train) GS.best_params_ fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline( mean_squared_error(y_test, model.predict(X_test)), color="green", label="Before Tuning", ) plt.axhline( mean_squared_error(y_test, GS.best_estimator_.predict(X_test)), color="purple", label="After Tuning", ) plt.box(False) plt.legend() plt.show() df1 = pd.DataFrame( {"predicts": GS.best_estimator_.predict(X_test), "actual_values": y_test} ) fig, ax = plt.subplots(figsize=(14, 8)) ax = sns.kdeplot(df1["predicts"], label="Predicitons of test set") sns.kdeplot(df1["actual_values"], label="Actual Values of test set", ax=ax) sns.despine(left=True, right=True, top=True, bottom=True) plt.xlabel("Predictions") plt.legend() plt.show() # # Check Assumptions # ## Target Distribution is Normal from scipy.stats import shapiro # test the normality of the variable stat, p = shapiro(y) # interpret the p-value alpha = 0.05 if p > alpha: print("Data looks normally distributed (fail to reject H0)") else: print("Data does not look normally distributed (reject H0)") plt.subplots(figsize=(14, 8)) sns.histplot(y) plt.xlabel("Prices") plt.ylabel("Frequency") plt.box(False) plt.title("Prices Distribtuion") plt.show() # - the assumption is violated from sklearn.preprocessing import PowerTransformer # ## Relation Between Features and Target is Linear fig, ax = plt.subplots(figsize=(14, 8)) sns.scatterplot(x=y_train, y=GS.best_estimator_.predict(X_train)) plt.box(False) plt.xlabel("Actual Values") plt.ylabel("Predictions") plt.show() # - First Assumption is met # ## The Distribution of Residuals is Normal # test the normality of the variable stat, p = shapiro( np.array(y_train).reshape(-1, 1) - GS.best_estimator_.predict(X_train) ) # interpret the p-value alpha = 0.05 if p > alpha: print("Data looks normally distributed (fail to reject H0)") else: print("Data does not look normally distributed (reject H0)") plt.subplots(figsize=(14, 8)) sns.histplot(y_train - GS.best_estimator_.predict(X_train)) plt.xlabel("Resiudals") plt.ylabel("Frequency") plt.title("Residuals Distribtion") plt.box(False) plt.show() # - Second assumption is violated # ## Independent and Homoscedasticity import statsmodels.api as sm from statsmodels.graphics.tsaplots import plot_acf plt.subplots(figsize=(14, 8)) sns.scatterplot(x=y_train, y=y_train - GS.best_estimator_.predict(X_train)) plt.axhline(0) plt.box(False) plt.show() acf = sm.tsa.acf(y_train - GS.best_estimator_.predict(X_train), nlags=50) # compute the standard error of the ACF n = len(y_train - GS.best_estimator_.predict(X_train)) stderr = 1.96 / np.sqrt(n) # plot the ACF with confidence intervals fig, ax = plt.subplots(figsize=(14, 8)) ax.stem(acf) ax.axhline(y=stderr, linestyle="--", color="gray") ax.axhline(y=-stderr, linestyle="--", color="gray") plt.box(False) plt.show() test = sm.stats.stattools.durbin_watson(y_train - GS.best_estimator_.predict(X_train)) print(f"Durbin Watson Test Score {test}") # - Independence is met ,the variance is constant or about to be constant is also met # ## Multicolinearity VIFS = pd.get_dummies(X, drop_first=True) from statsmodels.stats.outliers_influence import variance_inflation_factor # VIF dataframe VIFS = pd.get_dummies(X, drop_first=True) vif_data = pd.DataFrame() vif_data["feature"] = VIFS.columns # calculating VIF for each feature vif_data["VIF"] = [ variance_inflation_factor(VIFS.values, i) for i in range(len(VIFS.columns)) ] vif_data # - baseline 20 X = X.drop(columns="bedrooms") from statsmodels.stats.outliers_influence import variance_inflation_factor # VIF dataframe VIFS = pd.get_dummies(X, drop_first=True) vif_data = pd.DataFrame() vif_data["feature"] = VIFS.columns # calculating VIF for each feature vif_data["VIF"] = [ variance_inflation_factor(VIFS.values, i) for i in range(len(VIFS.columns)) ] vif_data Num_Columns.remove("bedrooms") # # Iterative Process Num_trans = Pipeline([("Scaling", RobustScaler())]) Cat_trans = Pipeline([("OneHotEncoding", OneHotEncoder(drop="first"))]) Preprocessing = ColumnTransformer( [("cat", Cat_trans, Cat_Columns), ("num", Num_trans, Num_Columns)] ) X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.8, random_state=0 ) PT = PowerTransformer(method="box-cox") y_train_max = PT.fit_transform(np.array(y_train).reshape(-1, 1)) test, p = shapiro(y_train_max) # interpret the p-value alpha = 0.05 if p > alpha: print("Data looks normally distributed (fail to reject H0)") else: print("Data does not look normally distributed (reject H0)") Baseline1 = mean_squared_error(y_train_max, [np.mean(y_train_max)] * len(y_train_max)) model = Pipeline([("Transformation", Preprocessing), ("model", Ridge())]) cv = cross_val_score( model, X_train, y_train_max, cv=KFold(n_splits=8, shuffle=True, random_state=0), n_jobs=-1, scoring=mse, ) msq_cv = np.mean(cv) model.fit(X_train, y_train_max) msq_train = mean_squared_error(y_train_max, model.predict(X_train)) # # Evaluate fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(Baseline1, color="red", label="BaseLine") plt.axhline(msq_train, color="blue", label="Train") plt.axhline(msq_cv, color="green", label="Cross Validation") plt.box(False) plt.legend() plt.show() # # Final Evaluate fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(Baseline1, color="red", label="BaseLine") plt.axhline(msq_train, color="orange", label="Training") plt.axhline(msq_cv, color="blue", label="Cross Validation") plt.axhline( mean_squared_error( PT.transform(np.array(y_test).reshape(-1, 1)), model.predict(X_test) ), color="green", label="Test", ) plt.box(False) plt.legend() plt.show() # # Tuning param_grid = dict(model__alpha=[0.00001, 0.0001, 0.001, 0.01, 0.1]) GS = GridSearchCV( model, param_grid=param_grid, scoring=mse, n_jobs=-1, cv=KFold(n_splits=8, shuffle=True, random_state=0), ) GS.fit(X_train, y_train_max) plt.subplots(figsize=(14, 8)) plt.axhline( mean_squared_error( PT.transform(np.array(y_test).reshape(-1, 1)), model.predict(X_test) ), color="green", label="Old", ) plt.axhline( mean_squared_error( PT.transform(np.array(y_test).reshape(-1, 1)), GS.fit(X_train, y_train_max).predict(X_test), ), color="purple", label="New", ) plt.box(False) plt.legend() plt.show() # # Check Assumptions # ## Relation Between Features and Target is Linear fig, ax = plt.subplots(figsize=(14, 8)) sns.scatterplot( x=pd.DataFrame(y_train_max)[0], y=pd.DataFrame(GS.best_estimator_.predict(X_train))[0], ) plt.box(False) plt.xlabel("Actual Values") plt.ylabel("Predictions") plt.show() # ## Independent and Homoscedasticity plt.subplots(figsize=(14, 8)) sns.scatterplot( x=pd.DataFrame(y_train_max)[0], y=pd.DataFrame(y_train_max)[0] - pd.DataFrame(GS.best_estimator_.predict(X_train))[0], ) plt.axhline(0) plt.box(False) plt.show() acf = sm.tsa.acf( pd.DataFrame(y_train_max)[0] - pd.DataFrame(GS.best_estimator_.predict(X_train))[0], nlags=50, ) # compute the standard error of the ACF n = len(y_train_max - GS.best_estimator_.predict(X_train)) stderr = 1.96 / np.sqrt(n) # plot the ACF with confidence intervals fig, ax = plt.subplots(figsize=(14, 8)) ax.stem(acf) ax.axhline(y=stderr, linestyle="--", color="gray") ax.axhline(y=-stderr, linestyle="--", color="gray") plt.box(False) plt.show() test = sm.stats.stattools.durbin_watson( pd.DataFrame(y_train_max)[0] - pd.DataFrame(GS.best_estimator_.predict(X_train))[0] ) print(f"Durbin Watson Test Score {test}") # ## The Distribution of Residuals is Normal plt.subplots(figsize=(14, 8)) sns.histplot( pd.DataFrame(y_train_max)[0] - pd.DataFrame(GS.best_estimator_.predict(X_train))[0] ) plt.xlabel("Resiudals") plt.ylabel("Frequency") plt.title("Residuals Distribtion") plt.box(False) plt.show() # test the normality of the variable stat, p = shapiro( pd.DataFrame(y_train_max)[0] - pd.DataFrame(GS.best_estimator_.predict(X_train))[0] ) # interpret the p-value alpha = 0.05 if p > alpha: print("Data looks normally distributed (fail to reject H0)") else: print("Data does not look normally distributed (reject H0)") # - the model doesn't meet only one assumption df2 = pd.merge( pd.DataFrame(GS.best_estimator_.predict(X_test), columns=["Predictions"]), pd.DataFrame( PT.transform(np.array(y_test).reshape(-1, 1)), columns=["Actual Values"] ), left_index=True, right_index=True, ) df2[["Predictions"]] = PT.inverse_transform(df2[["Predictions"]]) df2[["Actual Values"]] = PT.inverse_transform(df2[["Actual Values"]]) fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(14, 8)) sns.kdeplot(df2["Predictions"], label="Predictions", ax=ax0) sns.kdeplot(df2["Actual Values"], ax=ax0, label="Actual Values") ax0.set_title("New") sns.kdeplot(df1["predicts"], label="Predictions", ax=ax1) sns.kdeplot(df1["actual_values"], ax=ax1, label="Actual Values") ax1.set_title("Old") plt.box(False) plt.legend() plt.show() if mean_squared_error(df2["Actual Values"], df2["Predictions"]) < mean_squared_error( df1["actual_values"], df1["predicts"] ): print("Updates are reducing the error") else: print("Updates are not reducing the error") # # Non Parametric Model from xgboost import XGBRegressor model = Pipeline([("Transformation", Preprocessing), ("model", XGBRegressor())]) cv = cross_val_score( model, X_train, y_train, cv=KFold(n_splits=8, shuffle=True, random_state=0), n_jobs=-1, scoring=mse, ) msq_cv = np.mean(cv) model.fit(X_train, y_train) msq_train = mean_squared_error(y_train, model.predict(X_train)) fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(baseline, color="red", label="BaseLine") plt.axhline(msq_train, color="blue", label="Train") plt.axhline(msq_cv, color="green", label="Cross Validation") plt.box(False) plt.legend() plt.show() fig, ax = plt.subplots(figsize=(14, 8)) plt.axhline(baseline, color="red", label="BaseLine") plt.axhline(msq_train, color="orange", label="Training") plt.axhline(msq_cv, color="blue", label="Cross Validation") plt.axhline( mean_squared_error(y_test, model.predict(X_test)), color="green", label="Test" ) plt.box(False) plt.legend() plt.show()
false
0
6,517
0
6,544
6,517
129116185
<jupyter_start><jupyter_text>Smartphone Specifications and Prices in India # Contex : The dataset seems to contain information about different mobile phone models along with their specifications such as price, rating, sim type, processor, RAM, battery, display, camera, memory card support, and operating system. Source: The csv data was scraped from https://www.smartprix.com/mobiles Inspiration: You can build a recommender system or Price Prediction using the csv data. Kaggle dataset identifier: smartphone-specifications-and-prices-in-india <jupyter_script># importing libraries import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns pd.set_option("display.max_columns", None) pd.options.display.max_colwidth = 150 plt.style.use("seaborn-colorblind") import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # DataFrame Styling Function def highlight_columns(df, columns_to_shadow=[]): highlight = lambda slice_of_df: "background-color: #D8E8B0; color:black" sample_df = df.head(3) highlighted_df = sample_df.style.applymap( highlight, subset=pd.IndexSlice[:, columns_to_shadow] ) return display(highlighted_df) # # Importing Dataset For Cleaning # ## Importing and Analysing Data # importing data specs = pd.read_csv( "/kaggle/input/smartphone-specifications-and-prices-in-india/smartphones - smartphones.csv" ) display(specs.head()) print(specs.info()) # ## Take Null Rating Rows Out: # It's going to be a feature column, so can't use imputation and not wasting time cleaning these rows specs = specs.dropna(subset=["rating"]).reset_index(drop=True) display(specs.head()) specs.info() # ## Fix column values in Wrong Column # This part is done with manual inspection # Before Shifting # display(specs[~specs['battery'].str.contains('mAh')]) rows_to_shift1 = specs[~specs["battery"].str.contains("mAh")].index specs.iloc[rows_to_shift1, 6:-1] = specs.iloc[rows_to_shift1, 6:-1].shift(1, axis=1) # After Shifting # display(specs.iloc[rows_to_shift1]) # Before Shifting # display(specs[~specs['camera'].str.contains('MP')]) rows_to_shift2 = specs[~specs["camera"].str.contains("MP")].index specs.iloc[rows_to_shift2, 8:-1] = specs.iloc[rows_to_shift2, 8:].shift(-1, axis=1) # After Shifting # display(specs.iloc[rows_to_shift2]) # Before Shifting # display(specs[~specs['camera'].str.contains('MP')]) rows_to_fix1 = specs[specs["os"].str.contains("Memory")].index specs.iloc[rows_to_fix1, -1] = None # After Shifting # display(specs.iloc[rows_to_fix1]) # Before Shifting # display(specs[specs['card'].str.contains('Android')]) rows_to_fix2 = specs[specs["card"].str.contains("Android")].index specs.iloc[rows_to_fix2, 9:] = specs.iloc[rows_to_fix2, 9:].shift(1, axis=1) # After Shifting # display(specs.iloc[rows_to_fix2]) # # Fixing Columns to Extract Infos # ## 1. Price Column: # specs['price'] = specs['price'].str.replace('₹','').str.replace(',','').astype('int64') replacements = {"₹": "", ",": ""} specs["price"] = ( specs["price"] .str.replace(r"[₹,]", lambda x: replacements[x.group(0)], regex=True) .astype("int64") ) highlight_columns(specs, ["price"]) specs["price"].describe().round(2) # ## 2. Sim Column print("Available options:", specs[specs["sim"].str.len() == 57].sim.values[0]) # Most of the phone contains 3G,4G,VoLTE and does not contains Vo5g # So we'll ignore them, uncomment them to keep them # specs['3G'] = np.where(specs['sim'].str.contains('3G'),1,0) # specs['4G'] = np.where(specs['sim'].str.contains('4G'),1,0) # specs['VoLTE'] = np.where(specs['sim'].str.contains('VoLTE'),1,0) # specs['Vo5g'] = np.where(specs['sim'].str.contains('Vo5g'),1,0) specs["Number of Sim"] = np.where(specs["sim"].str.contains("Dual Sim"), 2, 1) specs["5G"] = np.where(specs["sim"].str.contains("5G"), 1, 0) specs["Wi-Fi"] = np.where(specs["sim"].str.contains("Wi-Fi"), 1, 0) specs["NFC"] = np.where(specs["sim"].str.contains("NFC"), 1, 0) specs["IR Blaster"] = np.where(specs["sim"].str.contains("IR Blaster"), 1, 0) specs = specs.drop(["sim"], axis=1) highlight_columns(specs, ["Number of Sim", "5G", "Wi-Fi", "NFC", "IR Blaster"]) # ## 3. Processor Column specs["Processor Name"] = specs["processor"].str.split(",", expand=True)[0] specs["Processor No. of Cores"] = specs["processor"].str.extract(r"(\w+)\s+Core") condition = [ specs["Processor No. of Cores"] == "Octa", specs["Processor No. of Cores"] == "Hexa", specs["Processor No. of Cores"] == "Quad", specs["Processor No. of Cores"] == "Dual", specs["Processor No. of Cores"] == "Single", ] values = [8, 6, 4, 2, 1] specs["Processor No. of Cores"] = np.select(condition, values, default=np.NaN).astype( "int64" ) specs["Processor Speed"] = ( specs["processor"].str.extract(r"(\d+\.\d*)\s+GHz").astype("float64") ) specs["Processor Speed"] = specs["Processor Speed"].fillna( value=specs["Processor Speed"].mode()[0] ) specs.drop(columns=["processor"], inplace=True) highlight_columns( specs, ["Processor Name", "Processor No. of Cores", "Processor Speed"] ) # ## 4. Ram Column specs["Ram Size(GB)"] = specs["ram"].str.extract(r"(\d+)\s+GB+\s+RAM").astype("float64") specs["ram"] = specs["ram"].str.replace(r"1 TB", r"1000 GB") specs["Rom Size(GB)"] = ( specs["ram"].str.extract(r"(\d+)\s+GB+\s+inbuilt").astype("float64") ) specs["Ram Size(GB)"] = specs["Ram Size(GB)"].fillna( value=specs["Ram Size(GB)"].mode()[0] ) specs.drop(columns=["ram"], inplace=True) highlight_columns(specs, ["Ram Size(GB)", "Rom Size(GB)"]) # ## 5. Battery Size specs["Battery Capacity"] = ( specs["battery"].str.extract(r"(\d+)\s+mAh").astype("float64") ) specs["Battery Capacity"] = specs["Battery Capacity"].fillna( value=specs["Battery Capacity"].mode()[0] ) # Values not containing Fast Charging may have Fast Charging in real life. So ignoring this feature # specs['Fast Charging'] = np.where(specs['battery'].str.contains('Fast Charging'),1,0) specs.drop(columns=["battery"], inplace=True) highlight_columns(specs, ["Battery Capacity"]) # ## 6. Display Column specs["Display Size(inches)"] = ( specs["display"].str.extract(r"(\d+\.\d*)\s+inches").astype("float64") ) specs["Resolution Width"] = specs["display"].str.extract(r"(\d+)\s*x").astype("int64") specs["Resolution Height"] = specs["display"].str.extract(r"(\d+)\s+px").astype("int64") specs["Display Refresh Rate"] = ( specs["display"].str.extract(r"(\d+)\s+Hz").fillna(value="60").astype("int64") ) specs["Display Size(inches)"] = specs["Display Size(inches)"].fillna( value=specs["Display Size(inches)"].mode()[0] ) specs.drop(columns=["display"], inplace=True) highlight_columns( specs, [ "Display Size(inches)", "Resolution Width", "Resolution Height", "Display Refresh Rate", ], ) # ## 7. Camera Column specs[["tr", "tf"]] = specs["camera"].str.split("Rear", expand=True) specs[["Rear Camera 1", "Rear Camera 2", "Rear Camera 3"]] = ( specs["tr"].str.extractall(r"(\d+)\s*MP").unstack().fillna(value=0).astype("int64") ) specs["Front Camera"] = ( specs["tf"].str.extract(r"(\d+)\s+MP").fillna(value=0).astype("int64") ) specs.drop(columns=["camera", "tr", "tf"], inplace=True) highlight_columns( specs, ["Rear Camera 1", "Rear Camera 2", "Rear Camera 3", "Front Camera"] ) # ## 8. Mermory Card Column specs["External Card Support"] = np.where( specs["card"].str.contains("Not Supported"), 0, 1 ) specs.drop(columns=["card"], inplace=True) highlight_columns(specs, ["External Card Support"]) # ## 9. OS column replacements = { "v11.0": "v11", "v10.0": "v10", "v13.0": "v13", "v15.0": "v15", "No FM Radio": "Android", } specs["os"] = specs["os"].str.replace( r"v11.0|v10.0|v13.0|v15.0|No\s+FM\s+Radio", lambda x: replacements[x.group(0)], regex=True, ) specs["os"] = specs["os"].fillna(value="Android") highlight_columns(specs, ["os"]) # ## 10. Brand Name Column Creation specs["Brand"] = specs["model"].str.split(expand=True)[0] highlight_columns(specs, ["Brand"]) # # Create and Save New Clean DataFrame phones = specs[ [ "model", "Brand", "os", "Processor Name", "Processor No. of Cores", "Processor Speed", "Number of Sim", "5G", "Wi-Fi", "NFC", "IR Blaster", "Ram Size(GB)", "Rom Size(GB)", "Battery Capacity", "Display Size(inches)", "Resolution Width", "Resolution Height", "Display Refresh Rate", "Rear Camera 1", "Rear Camera 2", "Rear Camera 3", "Front Camera", "External Card Support", "price", "rating", ] ] display(phones.head()) phones.info() # # Exploratory Data Analysis # # Top 10 Brands with most devices phones_brand = phones["Brand"].value_counts()[:10] phones_brand.sort_values().plot(kind="barh") plt.show() # # Top Rated Phones phones[phones["rating"] == max(phones["rating"])][ ["model", "Brand", "rating"] ].sort_values("Brand") # # IOS vs Android vs Others (average rating and price) phones["os_name"] = np.where( phones["os"].str.contains("Android"), "Android", np.where(phones["os"].str.contains("iOS"), "iOS", "Others"), ) phones_os_price = phones.groupby("os_name")["price"].mean().sort_values(ascending=False) phones_os_rating = ( phones.groupby("os_name")["rating"].mean().sort_values(ascending=False) ) fig, ax_os = plt.subplots(ncols=2, figsize=(12, 6)) ax_os[0].bar(phones_os_price.index, phones_os_price.values) ax_os[1].bar(phones_os_rating.index, phones_os_rating.values) ax_os[0].set_xlabel("Average Price By OS") ax_os[1].set_xlabel("Average Rating By OS") fig.suptitle("IOS vs Android vs Others (average rating and price)") plt.show() # #### Comment: # Seems like based on OS the devices ratings are almost identical, but Androids are much cheaper on average than other OS. # # Top Prcessor Brands phones["Processor Name"] = ( phones["Processor Name"] .str.split(expand=True)[0] .str.replace("Bionic", "Apple") .str.replace("Sanpdragon", "Snapdragon") ) phones_processor = ( phones.groupby("Processor Name")["rating"].mean().sort_values(ascending=False)[:5] ) plt.bar(phones_processor.index, phones_processor.values) plt.show() # # Distribution Graph of All the data cat_cols = [ "Processor No. of Cores", "Number of Sim", "5G", "NFC", "IR Blaster", "Ram Size(GB)", "Rom Size(GB)", "Display Refresh Rate", "External Card Support", ] fig, ax1 = plt.subplots(3, 3, figsize=(15, 15)) for n, col in enumerate(cat_cols): sns.countplot(data=phones, x=col, ax=ax1[n // 3, n % 3]) plt.show() # #### Comment: # **Processor's Number of Cores:** Almost all of the processors are octacore # **Number of Sim:** Almost all of the devices are dual sim. # **5G:** Most devices have 5G now, but around 40% of them still don't have it # **NFC, IR bluster:** They seems to be not in most devices. # **Ram:** Most devices got 4-8GB ram # **Rom:** 128GB seems to be the most popular Rom size as it's not too small and most of us don't need more than this # **Refresh Rate:** 60Hz is the most basic and common refresh rate. But smartphones are offering more refresh rates. num_cols = [ "Processor Speed", "Battery Capacity", "Display Size(inches)", "Resolution Width", "Resolution Height", "Rear Camera 1", "Rear Camera 2", "Rear Camera 3", "Front Camera", "price", "rating", ] fig, ax2 = plt.subplots(3, 4, figsize=(16, 16)) for n, col in enumerate(num_cols): sns.boxplot(data=phones, x=col, ax=ax2[n // 4, n % 4]) plt.show() # #### Comment: # **Processor Speen:** It's within 2.2-2.5GHz range mostly # **Battery:** Most smartphone offers 5000mAh battery # **Display:** 6.5-6.6inch is the most common display sizes with 1080p display # **Camera:** Smartphone seems to offer 50MP back camera with 2more lenses. And 8/16MP camera on front # # Correlation Between Columns fig, ax = plt.subplots(figsize=(15, 15)) sns.heatmap(phones.select_dtypes(exclude="object").corr(), annot=True, ax=ax) plt.show() # #### Comment # Rating seems to be mostly correlated with 5G,Ram Size,Display Features and some with Camera. # # Price Column Distribution Without Outliers # print(phones['price'].describe()) price_q1 = np.quantile(phones["price"], 0.25) price_q2 = np.quantile(phones["price"], 0.75) price_iqr = price_q2 - price_q1 phones_price = phones[ (phones["price"] > (price_q1 - 1.5 * price_iqr)) & (phones["price"] < (price_q2 + 1.5 * price_iqr)) ] sns.histplot(data=phones_price, x="price", kde=True) plt.show() # #### Comment: # Most smartphones seems to be within the Budget range # # Different specs by price segments phones["price_segment"] = pd.cut( phones["price"], [0, 25000, 50000, 80000, np.inf], labels=["Budget", "Mid-Range", "Premium", "Luxury"], ) phones_ps = phones.groupby("price_segment")[ [ "Processor Speed", "Battery Capacity", "rating", "Ram Size(GB)", "Rom Size(GB)", "Display Refresh Rate", ] ].mean() display(phones_ps) fig, ax3 = plt.subplots(2, 3, figsize=(16, 10)) for n, col in enumerate(phones_ps.columns): sns.barplot(data=phones_ps, y=col, x=phones_ps.index, ax=ax3[n // 3, n % 3]) ax3[n // 3, n % 3].set_xlabel("") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/116/129116185.ipynb
smartphone-specifications-and-prices-in-india
shrutiambekar
[{"Id": 129116185, "ScriptId": 38322832, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13579284, "CreationDate": "05/11/2023 06:04:53", "VersionNumber": 1.0, "Title": "Smartphone Specs Data Cleaning And EDA", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 306.0, "LinesInsertedFromPrevious": 306.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 184886397, "KernelVersionId": 129116185, "SourceDatasetVersionId": 5390758}]
[{"Id": 5390758, "DatasetId": 3124911, "DatasourceVersionId": 5464470, "CreatorUserId": 11981028, "LicenseName": "CC0: Public Domain", "CreationDate": "04/13/2023 06:13:13", "VersionNumber": 1.0, "Title": "Smartphone Specifications and Prices in India", "Slug": "smartphone-specifications-and-prices-in-india", "Subtitle": "Dataset about Smartphones for Price Prediction", "Description": "# Contex :\nThe dataset seems to contain information about different mobile phone models along with their specifications such as price, rating, sim type, processor, RAM, battery, display, camera, memory card support, and operating system.\n\nSource: The csv data was scraped from https://www.smartprix.com/mobiles\n\nInspiration: You can build a recommender system or Price Prediction using the csv data.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3124911, "CreatorUserId": 11981028, "OwnerUserId": 11981028.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5390758.0, "CurrentDatasourceVersionId": 5464470.0, "ForumId": 3188482, "Type": 2, "CreationDate": "04/13/2023 06:13:13", "LastActivityDate": "04/13/2023", "TotalViews": 5616, "TotalDownloads": 1086, "TotalVotes": 29, "TotalKernels": 7}]
[{"Id": 11981028, "UserName": "shrutiambekar", "DisplayName": "shruti ambekar", "RegisterDate": "10/17/2022", "PerformanceTier": 2}]
# importing libraries import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns pd.set_option("display.max_columns", None) pd.options.display.max_colwidth = 150 plt.style.use("seaborn-colorblind") import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # DataFrame Styling Function def highlight_columns(df, columns_to_shadow=[]): highlight = lambda slice_of_df: "background-color: #D8E8B0; color:black" sample_df = df.head(3) highlighted_df = sample_df.style.applymap( highlight, subset=pd.IndexSlice[:, columns_to_shadow] ) return display(highlighted_df) # # Importing Dataset For Cleaning # ## Importing and Analysing Data # importing data specs = pd.read_csv( "/kaggle/input/smartphone-specifications-and-prices-in-india/smartphones - smartphones.csv" ) display(specs.head()) print(specs.info()) # ## Take Null Rating Rows Out: # It's going to be a feature column, so can't use imputation and not wasting time cleaning these rows specs = specs.dropna(subset=["rating"]).reset_index(drop=True) display(specs.head()) specs.info() # ## Fix column values in Wrong Column # This part is done with manual inspection # Before Shifting # display(specs[~specs['battery'].str.contains('mAh')]) rows_to_shift1 = specs[~specs["battery"].str.contains("mAh")].index specs.iloc[rows_to_shift1, 6:-1] = specs.iloc[rows_to_shift1, 6:-1].shift(1, axis=1) # After Shifting # display(specs.iloc[rows_to_shift1]) # Before Shifting # display(specs[~specs['camera'].str.contains('MP')]) rows_to_shift2 = specs[~specs["camera"].str.contains("MP")].index specs.iloc[rows_to_shift2, 8:-1] = specs.iloc[rows_to_shift2, 8:].shift(-1, axis=1) # After Shifting # display(specs.iloc[rows_to_shift2]) # Before Shifting # display(specs[~specs['camera'].str.contains('MP')]) rows_to_fix1 = specs[specs["os"].str.contains("Memory")].index specs.iloc[rows_to_fix1, -1] = None # After Shifting # display(specs.iloc[rows_to_fix1]) # Before Shifting # display(specs[specs['card'].str.contains('Android')]) rows_to_fix2 = specs[specs["card"].str.contains("Android")].index specs.iloc[rows_to_fix2, 9:] = specs.iloc[rows_to_fix2, 9:].shift(1, axis=1) # After Shifting # display(specs.iloc[rows_to_fix2]) # # Fixing Columns to Extract Infos # ## 1. Price Column: # specs['price'] = specs['price'].str.replace('₹','').str.replace(',','').astype('int64') replacements = {"₹": "", ",": ""} specs["price"] = ( specs["price"] .str.replace(r"[₹,]", lambda x: replacements[x.group(0)], regex=True) .astype("int64") ) highlight_columns(specs, ["price"]) specs["price"].describe().round(2) # ## 2. Sim Column print("Available options:", specs[specs["sim"].str.len() == 57].sim.values[0]) # Most of the phone contains 3G,4G,VoLTE and does not contains Vo5g # So we'll ignore them, uncomment them to keep them # specs['3G'] = np.where(specs['sim'].str.contains('3G'),1,0) # specs['4G'] = np.where(specs['sim'].str.contains('4G'),1,0) # specs['VoLTE'] = np.where(specs['sim'].str.contains('VoLTE'),1,0) # specs['Vo5g'] = np.where(specs['sim'].str.contains('Vo5g'),1,0) specs["Number of Sim"] = np.where(specs["sim"].str.contains("Dual Sim"), 2, 1) specs["5G"] = np.where(specs["sim"].str.contains("5G"), 1, 0) specs["Wi-Fi"] = np.where(specs["sim"].str.contains("Wi-Fi"), 1, 0) specs["NFC"] = np.where(specs["sim"].str.contains("NFC"), 1, 0) specs["IR Blaster"] = np.where(specs["sim"].str.contains("IR Blaster"), 1, 0) specs = specs.drop(["sim"], axis=1) highlight_columns(specs, ["Number of Sim", "5G", "Wi-Fi", "NFC", "IR Blaster"]) # ## 3. Processor Column specs["Processor Name"] = specs["processor"].str.split(",", expand=True)[0] specs["Processor No. of Cores"] = specs["processor"].str.extract(r"(\w+)\s+Core") condition = [ specs["Processor No. of Cores"] == "Octa", specs["Processor No. of Cores"] == "Hexa", specs["Processor No. of Cores"] == "Quad", specs["Processor No. of Cores"] == "Dual", specs["Processor No. of Cores"] == "Single", ] values = [8, 6, 4, 2, 1] specs["Processor No. of Cores"] = np.select(condition, values, default=np.NaN).astype( "int64" ) specs["Processor Speed"] = ( specs["processor"].str.extract(r"(\d+\.\d*)\s+GHz").astype("float64") ) specs["Processor Speed"] = specs["Processor Speed"].fillna( value=specs["Processor Speed"].mode()[0] ) specs.drop(columns=["processor"], inplace=True) highlight_columns( specs, ["Processor Name", "Processor No. of Cores", "Processor Speed"] ) # ## 4. Ram Column specs["Ram Size(GB)"] = specs["ram"].str.extract(r"(\d+)\s+GB+\s+RAM").astype("float64") specs["ram"] = specs["ram"].str.replace(r"1 TB", r"1000 GB") specs["Rom Size(GB)"] = ( specs["ram"].str.extract(r"(\d+)\s+GB+\s+inbuilt").astype("float64") ) specs["Ram Size(GB)"] = specs["Ram Size(GB)"].fillna( value=specs["Ram Size(GB)"].mode()[0] ) specs.drop(columns=["ram"], inplace=True) highlight_columns(specs, ["Ram Size(GB)", "Rom Size(GB)"]) # ## 5. Battery Size specs["Battery Capacity"] = ( specs["battery"].str.extract(r"(\d+)\s+mAh").astype("float64") ) specs["Battery Capacity"] = specs["Battery Capacity"].fillna( value=specs["Battery Capacity"].mode()[0] ) # Values not containing Fast Charging may have Fast Charging in real life. So ignoring this feature # specs['Fast Charging'] = np.where(specs['battery'].str.contains('Fast Charging'),1,0) specs.drop(columns=["battery"], inplace=True) highlight_columns(specs, ["Battery Capacity"]) # ## 6. Display Column specs["Display Size(inches)"] = ( specs["display"].str.extract(r"(\d+\.\d*)\s+inches").astype("float64") ) specs["Resolution Width"] = specs["display"].str.extract(r"(\d+)\s*x").astype("int64") specs["Resolution Height"] = specs["display"].str.extract(r"(\d+)\s+px").astype("int64") specs["Display Refresh Rate"] = ( specs["display"].str.extract(r"(\d+)\s+Hz").fillna(value="60").astype("int64") ) specs["Display Size(inches)"] = specs["Display Size(inches)"].fillna( value=specs["Display Size(inches)"].mode()[0] ) specs.drop(columns=["display"], inplace=True) highlight_columns( specs, [ "Display Size(inches)", "Resolution Width", "Resolution Height", "Display Refresh Rate", ], ) # ## 7. Camera Column specs[["tr", "tf"]] = specs["camera"].str.split("Rear", expand=True) specs[["Rear Camera 1", "Rear Camera 2", "Rear Camera 3"]] = ( specs["tr"].str.extractall(r"(\d+)\s*MP").unstack().fillna(value=0).astype("int64") ) specs["Front Camera"] = ( specs["tf"].str.extract(r"(\d+)\s+MP").fillna(value=0).astype("int64") ) specs.drop(columns=["camera", "tr", "tf"], inplace=True) highlight_columns( specs, ["Rear Camera 1", "Rear Camera 2", "Rear Camera 3", "Front Camera"] ) # ## 8. Mermory Card Column specs["External Card Support"] = np.where( specs["card"].str.contains("Not Supported"), 0, 1 ) specs.drop(columns=["card"], inplace=True) highlight_columns(specs, ["External Card Support"]) # ## 9. OS column replacements = { "v11.0": "v11", "v10.0": "v10", "v13.0": "v13", "v15.0": "v15", "No FM Radio": "Android", } specs["os"] = specs["os"].str.replace( r"v11.0|v10.0|v13.0|v15.0|No\s+FM\s+Radio", lambda x: replacements[x.group(0)], regex=True, ) specs["os"] = specs["os"].fillna(value="Android") highlight_columns(specs, ["os"]) # ## 10. Brand Name Column Creation specs["Brand"] = specs["model"].str.split(expand=True)[0] highlight_columns(specs, ["Brand"]) # # Create and Save New Clean DataFrame phones = specs[ [ "model", "Brand", "os", "Processor Name", "Processor No. of Cores", "Processor Speed", "Number of Sim", "5G", "Wi-Fi", "NFC", "IR Blaster", "Ram Size(GB)", "Rom Size(GB)", "Battery Capacity", "Display Size(inches)", "Resolution Width", "Resolution Height", "Display Refresh Rate", "Rear Camera 1", "Rear Camera 2", "Rear Camera 3", "Front Camera", "External Card Support", "price", "rating", ] ] display(phones.head()) phones.info() # # Exploratory Data Analysis # # Top 10 Brands with most devices phones_brand = phones["Brand"].value_counts()[:10] phones_brand.sort_values().plot(kind="barh") plt.show() # # Top Rated Phones phones[phones["rating"] == max(phones["rating"])][ ["model", "Brand", "rating"] ].sort_values("Brand") # # IOS vs Android vs Others (average rating and price) phones["os_name"] = np.where( phones["os"].str.contains("Android"), "Android", np.where(phones["os"].str.contains("iOS"), "iOS", "Others"), ) phones_os_price = phones.groupby("os_name")["price"].mean().sort_values(ascending=False) phones_os_rating = ( phones.groupby("os_name")["rating"].mean().sort_values(ascending=False) ) fig, ax_os = plt.subplots(ncols=2, figsize=(12, 6)) ax_os[0].bar(phones_os_price.index, phones_os_price.values) ax_os[1].bar(phones_os_rating.index, phones_os_rating.values) ax_os[0].set_xlabel("Average Price By OS") ax_os[1].set_xlabel("Average Rating By OS") fig.suptitle("IOS vs Android vs Others (average rating and price)") plt.show() # #### Comment: # Seems like based on OS the devices ratings are almost identical, but Androids are much cheaper on average than other OS. # # Top Prcessor Brands phones["Processor Name"] = ( phones["Processor Name"] .str.split(expand=True)[0] .str.replace("Bionic", "Apple") .str.replace("Sanpdragon", "Snapdragon") ) phones_processor = ( phones.groupby("Processor Name")["rating"].mean().sort_values(ascending=False)[:5] ) plt.bar(phones_processor.index, phones_processor.values) plt.show() # # Distribution Graph of All the data cat_cols = [ "Processor No. of Cores", "Number of Sim", "5G", "NFC", "IR Blaster", "Ram Size(GB)", "Rom Size(GB)", "Display Refresh Rate", "External Card Support", ] fig, ax1 = plt.subplots(3, 3, figsize=(15, 15)) for n, col in enumerate(cat_cols): sns.countplot(data=phones, x=col, ax=ax1[n // 3, n % 3]) plt.show() # #### Comment: # **Processor's Number of Cores:** Almost all of the processors are octacore # **Number of Sim:** Almost all of the devices are dual sim. # **5G:** Most devices have 5G now, but around 40% of them still don't have it # **NFC, IR bluster:** They seems to be not in most devices. # **Ram:** Most devices got 4-8GB ram # **Rom:** 128GB seems to be the most popular Rom size as it's not too small and most of us don't need more than this # **Refresh Rate:** 60Hz is the most basic and common refresh rate. But smartphones are offering more refresh rates. num_cols = [ "Processor Speed", "Battery Capacity", "Display Size(inches)", "Resolution Width", "Resolution Height", "Rear Camera 1", "Rear Camera 2", "Rear Camera 3", "Front Camera", "price", "rating", ] fig, ax2 = plt.subplots(3, 4, figsize=(16, 16)) for n, col in enumerate(num_cols): sns.boxplot(data=phones, x=col, ax=ax2[n // 4, n % 4]) plt.show() # #### Comment: # **Processor Speen:** It's within 2.2-2.5GHz range mostly # **Battery:** Most smartphone offers 5000mAh battery # **Display:** 6.5-6.6inch is the most common display sizes with 1080p display # **Camera:** Smartphone seems to offer 50MP back camera with 2more lenses. And 8/16MP camera on front # # Correlation Between Columns fig, ax = plt.subplots(figsize=(15, 15)) sns.heatmap(phones.select_dtypes(exclude="object").corr(), annot=True, ax=ax) plt.show() # #### Comment # Rating seems to be mostly correlated with 5G,Ram Size,Display Features and some with Camera. # # Price Column Distribution Without Outliers # print(phones['price'].describe()) price_q1 = np.quantile(phones["price"], 0.25) price_q2 = np.quantile(phones["price"], 0.75) price_iqr = price_q2 - price_q1 phones_price = phones[ (phones["price"] > (price_q1 - 1.5 * price_iqr)) & (phones["price"] < (price_q2 + 1.5 * price_iqr)) ] sns.histplot(data=phones_price, x="price", kde=True) plt.show() # #### Comment: # Most smartphones seems to be within the Budget range # # Different specs by price segments phones["price_segment"] = pd.cut( phones["price"], [0, 25000, 50000, 80000, np.inf], labels=["Budget", "Mid-Range", "Premium", "Luxury"], ) phones_ps = phones.groupby("price_segment")[ [ "Processor Speed", "Battery Capacity", "rating", "Ram Size(GB)", "Rom Size(GB)", "Display Refresh Rate", ] ].mean() display(phones_ps) fig, ax3 = plt.subplots(2, 3, figsize=(16, 10)) for n, col in enumerate(phones_ps.columns): sns.barplot(data=phones_ps, y=col, x=phones_ps.index, ax=ax3[n // 3, n % 3]) ax3[n // 3, n % 3].set_xlabel("") plt.show()
false
1
4,479
2
4,613
4,479
129116247
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use("bmh") plt.rcParams["figure.figsize"] = (10, 10) title_config = {"fontsize": 20, "y": 1.05} train = pd.read_csv("/kaggle/input/santander-customer-transaction-prediction/train.csv") train.head() X_train = train.iloc[:, 2:].values.astype("float64") y_train = train["target"].values pd.DataFrame(X_train[y_train == 0]).plot.kde(ind=100, legend=False) plt.title("Likelihood KDE Plots for the Negative Class", **title_config) pd.DataFrame(X_train[y_train == 1]).plot.kde(ind=100, legend=False) plt.title("Likelihood KDE Plots for the Positive Class", **title_config) from sklearn.preprocessing import StandardScaler scaled = pd.DataFrame(StandardScaler().fit_transform(X_train)) scaled[y_train == 0].plot.kde(ind=100, legend=False) plt.title( "Likelihood KDE Plots for the Negative Class after Standardization", **title_config ) scaled[y_train == 1].plot.kde(ind=100, legend=False) plt.title( "Likelihood KDE Plots for the Positive Class after Standardization", **title_config ) from sklearn.preprocessing import QuantileTransformer transformed = pd.DataFrame( QuantileTransformer(output_distribution="normal").fit_transform(X_train) ) transformed[y_train == 0].plot.kde(ind=100, legend=False) plt.title( "Likelihood KDE Plots for the Negative Class after Quantile Transformation", **title_config, ) plt.imshow(transformed.corr()) plt.colorbar() plt.title("Correlation Matrix Plot of the Features", **title_config) plt.hist(transformed[y_train == 0].mean() - transformed[y_train == 1].mean()) plt.title("Histogram of Sample Mean Differences between Two Classes", **title_config) plt.hist(transformed[y_train == 0].var() - transformed[y_train == 1].var()) plt.title( "Histogram of Sample Variance Differences between Two Classes", **title_config ) select = ( (transformed[y_train == 0].var() - transformed[y_train == 1].var()) .nsmallest(2) .index ) plt.scatter( transformed.loc[y_train == 0, select[0]], transformed.loc[y_train == 0, select[1]], alpha=0.5, label="Negative", ) plt.scatter( transformed.loc[y_train == 1, select[0]], transformed.loc[y_train == 1, select[1]], alpha=0.5, label="Positive", ) plt.xlabel(f"Transformed var_{select[0]}") plt.ylabel(f"Transformed var_{select[1]}") plt.title( "Positive Class Looks More Concentrated Despite Higher Sample Variance", **title_config, ) plt.legend() ( transformed.loc[y_train == 0, select[0]].mean() - transformed.loc[y_train == 1, select[0]].mean() ) ( transformed.loc[y_train == 0, select[1]].mean() - transformed.loc[y_train == 1, select[1]].mean() ) plt.scatter( X_train[y_train == 0, select[0]], X_train[y_train == 0, select[1]], alpha=0.5, label="Negative", ) plt.scatter( X_train[y_train == 1, select[0]], X_train[y_train == 1, select[1]], alpha=0.5, label="Positive", ) plt.xlabel(f"var_{select[0]}") plt.ylabel(f"var_{select[1]}") plt.title("Bounds in Data", **title_config) plt.legend() size0 = (y_train == 0).sum() size1 = y_train.size - size0 x0 = np.random.normal( transformed.loc[y_train == 0, select[0]].mean(), transformed.loc[y_train == 0, select[0]].std(), size=size0, ) y0 = np.random.normal( transformed.loc[y_train == 0, select[1]].mean(), transformed.loc[y_train == 0, select[1]].std(), size=size0, ) x1 = np.random.normal( transformed.loc[y_train == 1, select[0]].mean(), transformed.loc[y_train == 1, select[0]].std(), size=size1, ) y1 = np.random.normal( transformed.loc[y_train == 1, select[1]].mean(), transformed.loc[y_train == 1, select[1]].std(), size=size1, ) plt.scatter(x0, y0, alpha=0.5, label="Negative") plt.scatter(x1, y1, alpha=0.5, label="Positive") plt.xlabel(f"Simulated var_{select[0]}") plt.ylabel(f"Simulated var_{select[1]}") plt.title("Simulated Data for the Puzzle", **title_config) plt.legend() from sklearn.pipeline import make_pipeline from sklearn.naive_bayes import GaussianNB pipeline = make_pipeline( QuantileTransformer(output_distribution="normal"), GaussianNB() ) pipeline.fit(X_train, y_train) from sklearn.metrics import roc_curve, auc fpr, tpr, thr = roc_curve(y_train, pipeline.predict_proba(X_train)[:, 1]) plt.plot(fpr, tpr) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic Plot", **title_config) auc(fpr, tpr) from sklearn.model_selection import cross_val_score cross_val_score(pipeline, X_train, y_train, scoring="roc_auc", cv=10).mean() from sklearn.metrics import roc_auc_score pipeline.fit(X_train, y_train) model = pipeline.named_steps["gaussiannb"] size = 1000000 size0 = int(size * model.class_prior_[0]) size1 = size - size0 sample0 = np.concatenate( [ [np.random.normal(i, j, size=size0)] for i, j in zip(model.theta_[0], np.sqrt(model.sigma_[0])) ] ).T sample1 = np.concatenate( [ [np.random.normal(i, j, size=size1)] for i, j in zip(model.theta_[1], np.sqrt(model.sigma_[1])) ] ).T X_sample = np.concatenate([sample0, sample1]) y_sample = np.concatenate([np.zeros(size0), np.ones(size1)]) roc_auc_score(y_sample, model.predict_proba(X_sample)[:, 1]) test = pd.read_csv("/kaggle/input/santander-customer-transaction-prediction/test.csv") test.head() X_test = test.iloc[:, 1:].values.astype("float64") submission = pd.read_csv( "/kaggle/input/santander-customer-transaction-prediction/sample_submission.csv" ) submission["target"] = pipeline.predict_proba(X_test)[:, 1] submission.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/116/129116247.ipynb
null
null
[{"Id": 129116247, "ScriptId": 38119578, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14310511, "CreationDate": "05/11/2023 06:05:29", "VersionNumber": 1.0, "Title": "UTS NAVIE BAYUS", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 145.0, "LinesInsertedFromPrevious": 145.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use("bmh") plt.rcParams["figure.figsize"] = (10, 10) title_config = {"fontsize": 20, "y": 1.05} train = pd.read_csv("/kaggle/input/santander-customer-transaction-prediction/train.csv") train.head() X_train = train.iloc[:, 2:].values.astype("float64") y_train = train["target"].values pd.DataFrame(X_train[y_train == 0]).plot.kde(ind=100, legend=False) plt.title("Likelihood KDE Plots for the Negative Class", **title_config) pd.DataFrame(X_train[y_train == 1]).plot.kde(ind=100, legend=False) plt.title("Likelihood KDE Plots for the Positive Class", **title_config) from sklearn.preprocessing import StandardScaler scaled = pd.DataFrame(StandardScaler().fit_transform(X_train)) scaled[y_train == 0].plot.kde(ind=100, legend=False) plt.title( "Likelihood KDE Plots for the Negative Class after Standardization", **title_config ) scaled[y_train == 1].plot.kde(ind=100, legend=False) plt.title( "Likelihood KDE Plots for the Positive Class after Standardization", **title_config ) from sklearn.preprocessing import QuantileTransformer transformed = pd.DataFrame( QuantileTransformer(output_distribution="normal").fit_transform(X_train) ) transformed[y_train == 0].plot.kde(ind=100, legend=False) plt.title( "Likelihood KDE Plots for the Negative Class after Quantile Transformation", **title_config, ) plt.imshow(transformed.corr()) plt.colorbar() plt.title("Correlation Matrix Plot of the Features", **title_config) plt.hist(transformed[y_train == 0].mean() - transformed[y_train == 1].mean()) plt.title("Histogram of Sample Mean Differences between Two Classes", **title_config) plt.hist(transformed[y_train == 0].var() - transformed[y_train == 1].var()) plt.title( "Histogram of Sample Variance Differences between Two Classes", **title_config ) select = ( (transformed[y_train == 0].var() - transformed[y_train == 1].var()) .nsmallest(2) .index ) plt.scatter( transformed.loc[y_train == 0, select[0]], transformed.loc[y_train == 0, select[1]], alpha=0.5, label="Negative", ) plt.scatter( transformed.loc[y_train == 1, select[0]], transformed.loc[y_train == 1, select[1]], alpha=0.5, label="Positive", ) plt.xlabel(f"Transformed var_{select[0]}") plt.ylabel(f"Transformed var_{select[1]}") plt.title( "Positive Class Looks More Concentrated Despite Higher Sample Variance", **title_config, ) plt.legend() ( transformed.loc[y_train == 0, select[0]].mean() - transformed.loc[y_train == 1, select[0]].mean() ) ( transformed.loc[y_train == 0, select[1]].mean() - transformed.loc[y_train == 1, select[1]].mean() ) plt.scatter( X_train[y_train == 0, select[0]], X_train[y_train == 0, select[1]], alpha=0.5, label="Negative", ) plt.scatter( X_train[y_train == 1, select[0]], X_train[y_train == 1, select[1]], alpha=0.5, label="Positive", ) plt.xlabel(f"var_{select[0]}") plt.ylabel(f"var_{select[1]}") plt.title("Bounds in Data", **title_config) plt.legend() size0 = (y_train == 0).sum() size1 = y_train.size - size0 x0 = np.random.normal( transformed.loc[y_train == 0, select[0]].mean(), transformed.loc[y_train == 0, select[0]].std(), size=size0, ) y0 = np.random.normal( transformed.loc[y_train == 0, select[1]].mean(), transformed.loc[y_train == 0, select[1]].std(), size=size0, ) x1 = np.random.normal( transformed.loc[y_train == 1, select[0]].mean(), transformed.loc[y_train == 1, select[0]].std(), size=size1, ) y1 = np.random.normal( transformed.loc[y_train == 1, select[1]].mean(), transformed.loc[y_train == 1, select[1]].std(), size=size1, ) plt.scatter(x0, y0, alpha=0.5, label="Negative") plt.scatter(x1, y1, alpha=0.5, label="Positive") plt.xlabel(f"Simulated var_{select[0]}") plt.ylabel(f"Simulated var_{select[1]}") plt.title("Simulated Data for the Puzzle", **title_config) plt.legend() from sklearn.pipeline import make_pipeline from sklearn.naive_bayes import GaussianNB pipeline = make_pipeline( QuantileTransformer(output_distribution="normal"), GaussianNB() ) pipeline.fit(X_train, y_train) from sklearn.metrics import roc_curve, auc fpr, tpr, thr = roc_curve(y_train, pipeline.predict_proba(X_train)[:, 1]) plt.plot(fpr, tpr) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("Receiver Operating Characteristic Plot", **title_config) auc(fpr, tpr) from sklearn.model_selection import cross_val_score cross_val_score(pipeline, X_train, y_train, scoring="roc_auc", cv=10).mean() from sklearn.metrics import roc_auc_score pipeline.fit(X_train, y_train) model = pipeline.named_steps["gaussiannb"] size = 1000000 size0 = int(size * model.class_prior_[0]) size1 = size - size0 sample0 = np.concatenate( [ [np.random.normal(i, j, size=size0)] for i, j in zip(model.theta_[0], np.sqrt(model.sigma_[0])) ] ).T sample1 = np.concatenate( [ [np.random.normal(i, j, size=size1)] for i, j in zip(model.theta_[1], np.sqrt(model.sigma_[1])) ] ).T X_sample = np.concatenate([sample0, sample1]) y_sample = np.concatenate([np.zeros(size0), np.ones(size1)]) roc_auc_score(y_sample, model.predict_proba(X_sample)[:, 1]) test = pd.read_csv("/kaggle/input/santander-customer-transaction-prediction/test.csv") test.head() X_test = test.iloc[:, 1:].values.astype("float64") submission = pd.read_csv( "/kaggle/input/santander-customer-transaction-prediction/sample_submission.csv" ) submission["target"] = pipeline.predict_proba(X_test)[:, 1] submission.to_csv("submission.csv", index=False)
false
0
2,125
0
2,125
2,125
129116346
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.stats as stats import sklearn.linear_model as linear_model import seaborn as sns import xgboost as xgb from sklearn.model_selection import KFold from IPython.display import HTML, display from sklearn.manifold import TSNE from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler pd.options.display.max_rows = 1000 pd.options.display.max_columns = 20 # Reading training csv file for house-price-advanced-regression-techniques # Reading test csv file for house-price-advanced-regression-techniques train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv") quantitative = [f for f in train.columns if train.dtypes[f] != "object"] quantitative.remove("SalePrice") quantitative.remove("Id") qualitative = [f for f in train.columns if train.dtypes[f] == "object"] missing = train.isnull().sum() missing = missing[missing > 0] missing.sort_values(inplace=True) # Creating bar graph missing.plot.bar() import scipy.stats as st y = train["SalePrice"] # Create Johnson SU distribution graph of house sale price plt.figure(1) plt.title("Johnson SU") sns.distplot(y, kde=False, fit=st.johnsonsu) # Create normal distribution graph of house sale price plt.figure(2) plt.title("Normal") sns.distplot(y, kde=False, fit=st.norm) # Create log normal distribution graph of house sale price plt.figure(3) plt.title("Log Normal") sns.distplot(y, kde=False, fit=st.lognorm) # Sales Price does not follow normal distribution. Best fit is Johnson SU distribution test_normality = lambda x: stats.shapiro(x.fillna(0))[1] < 0.01 normal = pd.DataFrame(train[quantitative]) normal = normal.apply(test_normality) print(not normal.any()) # Quantitative variables do not have normal ditribution. They are transformed # Regresstion tranformation smoothes out irregularities f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, "value") # Check distibution of Sale Price for qualitative variables for q in qualitative: train[q] = train[q].astype("category") if train[q].isnull().any(): train[q] = train[q].cat.add_categories(["MISSING"]) train[q] = train[q].fillna("MISSING") # Create boxplot graphs def boxplot(x, y, **kwargs): sns.boxplot(x=x, y=y) x = plt.xticks(rotation=90) e = pd.melt(train, id_vars=["SalePrice"], value_vars=qualitative) f = sns.FacetGrid(e, col="variable", col_wrap=2, sharex=False, sharey=False) f = f.map(boxplot, "value", "SalePrice") # Neighborhood has large effect on prices of houses # Estimates influence of categorical variables on house sale price def anova(frame): anv = pd.DataFrame() anv["feature"] = qualitative pvals = [] for c in qualitative: samples = [] for cls in frame[c].unique(): s = frame[frame[c] == cls]["SalePrice"].values samples.append(s) pval = stats.f_oneway(*samples)[1] pvals.append(pval) anv["pval"] = pvals return anv.sort_values("pval") a = anova(train) a["disparity"] = np.log(1.0 / a["pval"].values) sns.barplot(data=a, x="feature", y="disparity") x = plt.xticks(rotation=90) # econding qualitative variables based on mean of house sale price def encode(frame, feature): ordering = pd.DataFrame() ordering["val"] = frame[feature].unique() ordering.index = ordering.val ordering["spmean"] = ( frame[[feature, "SalePrice"]].groupby(feature).mean()["SalePrice"] ) ordering = ordering.sort_values("spmean") ordering["ordering"] = range(1, ordering.shape[0] + 1) ordering = ordering["ordering"].to_dict() for cat, o in ordering.items(): frame.loc[frame[feature] == cat, feature + "_E"] = o qual_encoded = [] for q in qualitative: encode(train, q) qual_encoded.append(q + "_E") print(qual_encoded) # Correlations def spearman(frame, features): spr = pd.DataFrame() spr["feature"] = features spr["spearman"] = [frame[f].corr(frame["SalePrice"], "spearman") for f in features] spr = spr.sort_values("spearman") plt.figure(figsize=(6, 0.25 * len(features))) sns.barplot(data=spr, y="feature", x="spearman", orient="h") features = quantitative + qual_encoded spearman(train, features) # Correlations between variables # Garage area correlates with amount of cars plt.figure(1) corr = train[quantitative + ["SalePrice"]].corr() sns.heatmap(corr) plt.figure(2) corr = train[qual_encoded + ["SalePrice"]].corr() sns.heatmap(corr) plt.figure(3) corr = pd.DataFrame( np.zeros([len(quantitative) + 1, len(qual_encoded) + 1]), index=quantitative + ["SalePrice"], columns=qual_encoded + ["SalePrice"], ) for q1 in quantitative + ["SalePrice"]: for q2 in qual_encoded + ["SalePrice"]: corr.loc[q1, q2] = train[q1].corr(train[q2]) sns.heatmap(corr) # Comparing house sale price with independent variables def pairplot(x, y, **kwargs): ax = plt.gca() ts = pd.DataFrame({"time": x, "val": y}) ts = ts.groupby("time").mean() ts.plot(ax=ax) plt.xticks(rotation=90) f = pd.melt(train, id_vars=["SalePrice"], value_vars=quantitative + qual_encoded) g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False) g = g.map(pairplot, "value", "SalePrice") # Houses divided into 2 price groups (under 200000 and over 200000) features = quantitative standard = train[train["SalePrice"] < 200000] pricey = train[train["SalePrice"] >= 200000] diff = pd.DataFrame() diff["feature"] = features diff["difference"] = [ (pricey[f].fillna(0.0).mean() - standard[f].fillna(0.0).mean()) / (standard[f].fillna(0.0).mean()) for f in features ] sns.barplot(data=diff, x="feature", y="difference") x = plt.xticks(rotation=90) features = quantitative + qual_encoded model = TSNE(n_components=2, random_state=0, perplexity=50) X = train[features].fillna(0.0).values tsne = model.fit_transform(X) std = StandardScaler() s = std.fit_transform(X) pca = PCA(n_components=30) pca.fit(s) pc = pca.transform(s) kmeans = KMeans(n_clusters=5) kmeans.fit(pc) fr = pd.DataFrame({"tsne1": tsne[:, 0], "tsne2": tsne[:, 1], "cluster": kmeans.labels_}) sns.lmplot(data=fr, x="tsne1", y="tsne2", hue="cluster", fit_reg=False) print(np.sum(pca.explained_variance_ratio_)) y = train["SalePrice"].values def johnson(y): gamma, eta, epsilon, lbda = stats.johnsonsu.fit(y) yt = gamma + eta * np.arcsinh((y - epsilon) / lbda) return yt, gamma, eta, epsilon, lbda def johnson_inverse(y, gamma, eta, epsilon, lbda): return lbda * np.sinh((y - gamma) / eta) + epsilon yt, g, et, ep, l = johnson(y) yt2 = johnson_inverse(yt, g, et, ep, l) plt.figure(1) sns.distplot(yt) plt.figure(2) sns.distplot(yt2) # adding quadratic term to improve local score def error(actual, predicted): actual = np.log(actual) predicted = np.log(predicted) return np.sqrt(np.sum(np.square(actual - predicted)) / len(actual)) def log_transform(feature): train[feature] = np.log1p(train[feature].values) def quadratic(feature): train[feature + "2"] = train[feature] ** 2 log_transform("GrLivArea") log_transform("1stFlrSF") log_transform("2ndFlrSF") log_transform("TotalBsmtSF") log_transform("LotArea") log_transform("LotFrontage") log_transform("KitchenAbvGr") log_transform("GarageArea") quadratic("OverallQual") quadratic("YearBuilt") quadratic("YearRemodAdd") quadratic("TotalBsmtSF") quadratic("2ndFlrSF") quadratic("Neighborhood_E") quadratic("RoofMatl_E") quadratic("GrLivArea") qdr = [ "OverallQual2", "YearBuilt2", "YearRemodAdd2", "TotalBsmtSF2", "2ndFlrSF2", "Neighborhood_E2", "RoofMatl_E2", "GrLivArea2", ] train["HasBasement"] = train["TotalBsmtSF"].apply(lambda x: 1 if x > 0 else 0) train["HasGarage"] = train["GarageArea"].apply(lambda x: 1 if x > 0 else 0) train["Has2ndFloor"] = train["2ndFlrSF"].apply(lambda x: 1 if x > 0 else 0) train["HasMasVnr"] = train["MasVnrArea"].apply(lambda x: 1 if x > 0 else 0) train["HasWoodDeck"] = train["WoodDeckSF"].apply(lambda x: 1 if x > 0 else 0) train["HasPorch"] = train["OpenPorchSF"].apply(lambda x: 1 if x > 0 else 0) train["HasPool"] = train["PoolArea"].apply(lambda x: 1 if x > 0 else 0) train["IsNew"] = train["YearBuilt"].apply(lambda x: 1 if x > 2000 else 0) boolean = [ "HasBasement", "HasGarage", "Has2ndFloor", "HasMasVnr", "HasWoodDeck", "HasPorch", "HasPool", "IsNew", ] features = quantitative + qual_encoded + boolean + qdr lasso = linear_model.LassoLarsCV(max_iter=10000) X = train[features].fillna(0.0).values Y = train["SalePrice"].values lasso.fit(X, np.log(Y)) Ypred = np.exp(lasso.predict(X)) error(Y, Ypred) # dividing variables into splines import patsy Y, X = patsy.dmatrices( "SalePrice ~ \ GarageCars + \ np.log1p(BsmtFinSF1) + \ ScreenPorch + \ Condition1_E + \ Condition2_E + \ WoodDeckSF + \ np.log1p(LotArea) + \ Foundation_E + \ MSZoning_E + \ MasVnrType_E + \ HouseStyle_E + \ Fireplaces + \ CentralAir_E + \ BsmtFullBath + \ EnclosedPorch + \ PavedDrive_E + \ ExterQual_E + \ bs(OverallCond, df=7, degree=1) + \ bs(MSSubClass, df=7, degree=1) + \ bs(LotArea, df=2, degree=1) + \ bs(FullBath, df=3, degree=1) + \ bs(HalfBath, df=2, degree=1) + \ bs(BsmtFullBath, df=3, degree=1) + \ bs(TotRmsAbvGrd, df=2, degree=1) + \ bs(LandSlope_E, df=2, degree=1) + \ bs(LotConfig_E, df=2, degree=1) + \ bs(SaleCondition_E, df=3, degree=1) + \ OverallQual + np.square(OverallQual) + \ GrLivArea + np.square(GrLivArea) + \ Q('1stFlrSF') + np.square(Q('1stFlrSF')) + \ Q('2ndFlrSF') + np.square(Q('2ndFlrSF')) + \ TotalBsmtSF + np.square(TotalBsmtSF) + \ KitchenAbvGr + np.square(KitchenAbvGr) + \ YearBuilt + np.square(YearBuilt) + \ Neighborhood_E + np.square(Neighborhood_E) + \ Neighborhood_E:OverallQual + \ MSSubClass:BldgType_E + \ ExterQual_E:OverallQual + \ PoolArea:PoolQC_E + \ Fireplaces:FireplaceQu_E + \ OverallQual:KitchenQual_E + \ GarageQual_E:GarageCond + \ GarageArea:GarageCars + \ Q('1stFlrSF'):TotalBsmtSF + \ TotRmsAbvGrd:GrLivArea", train.to_dict("list"), ) ridge = linear_model.RidgeCV(cv=10) ridge.fit(X, np.log(Y)) Ypred = np.exp(ridge.predict(X)) print(error(Y, Ypred))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/116/129116346.ipynb
null
null
[{"Id": 129116346, "ScriptId": 38379539, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11814885, "CreationDate": "05/11/2023 06:06:29", "VersionNumber": 3.0, "Title": "notebook6beac02115", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 330.0, "LinesInsertedFromPrevious": 312.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 18.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.stats as stats import sklearn.linear_model as linear_model import seaborn as sns import xgboost as xgb from sklearn.model_selection import KFold from IPython.display import HTML, display from sklearn.manifold import TSNE from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler pd.options.display.max_rows = 1000 pd.options.display.max_columns = 20 # Reading training csv file for house-price-advanced-regression-techniques # Reading test csv file for house-price-advanced-regression-techniques train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv") quantitative = [f for f in train.columns if train.dtypes[f] != "object"] quantitative.remove("SalePrice") quantitative.remove("Id") qualitative = [f for f in train.columns if train.dtypes[f] == "object"] missing = train.isnull().sum() missing = missing[missing > 0] missing.sort_values(inplace=True) # Creating bar graph missing.plot.bar() import scipy.stats as st y = train["SalePrice"] # Create Johnson SU distribution graph of house sale price plt.figure(1) plt.title("Johnson SU") sns.distplot(y, kde=False, fit=st.johnsonsu) # Create normal distribution graph of house sale price plt.figure(2) plt.title("Normal") sns.distplot(y, kde=False, fit=st.norm) # Create log normal distribution graph of house sale price plt.figure(3) plt.title("Log Normal") sns.distplot(y, kde=False, fit=st.lognorm) # Sales Price does not follow normal distribution. Best fit is Johnson SU distribution test_normality = lambda x: stats.shapiro(x.fillna(0))[1] < 0.01 normal = pd.DataFrame(train[quantitative]) normal = normal.apply(test_normality) print(not normal.any()) # Quantitative variables do not have normal ditribution. They are transformed # Regresstion tranformation smoothes out irregularities f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, "value") # Check distibution of Sale Price for qualitative variables for q in qualitative: train[q] = train[q].astype("category") if train[q].isnull().any(): train[q] = train[q].cat.add_categories(["MISSING"]) train[q] = train[q].fillna("MISSING") # Create boxplot graphs def boxplot(x, y, **kwargs): sns.boxplot(x=x, y=y) x = plt.xticks(rotation=90) e = pd.melt(train, id_vars=["SalePrice"], value_vars=qualitative) f = sns.FacetGrid(e, col="variable", col_wrap=2, sharex=False, sharey=False) f = f.map(boxplot, "value", "SalePrice") # Neighborhood has large effect on prices of houses # Estimates influence of categorical variables on house sale price def anova(frame): anv = pd.DataFrame() anv["feature"] = qualitative pvals = [] for c in qualitative: samples = [] for cls in frame[c].unique(): s = frame[frame[c] == cls]["SalePrice"].values samples.append(s) pval = stats.f_oneway(*samples)[1] pvals.append(pval) anv["pval"] = pvals return anv.sort_values("pval") a = anova(train) a["disparity"] = np.log(1.0 / a["pval"].values) sns.barplot(data=a, x="feature", y="disparity") x = plt.xticks(rotation=90) # econding qualitative variables based on mean of house sale price def encode(frame, feature): ordering = pd.DataFrame() ordering["val"] = frame[feature].unique() ordering.index = ordering.val ordering["spmean"] = ( frame[[feature, "SalePrice"]].groupby(feature).mean()["SalePrice"] ) ordering = ordering.sort_values("spmean") ordering["ordering"] = range(1, ordering.shape[0] + 1) ordering = ordering["ordering"].to_dict() for cat, o in ordering.items(): frame.loc[frame[feature] == cat, feature + "_E"] = o qual_encoded = [] for q in qualitative: encode(train, q) qual_encoded.append(q + "_E") print(qual_encoded) # Correlations def spearman(frame, features): spr = pd.DataFrame() spr["feature"] = features spr["spearman"] = [frame[f].corr(frame["SalePrice"], "spearman") for f in features] spr = spr.sort_values("spearman") plt.figure(figsize=(6, 0.25 * len(features))) sns.barplot(data=spr, y="feature", x="spearman", orient="h") features = quantitative + qual_encoded spearman(train, features) # Correlations between variables # Garage area correlates with amount of cars plt.figure(1) corr = train[quantitative + ["SalePrice"]].corr() sns.heatmap(corr) plt.figure(2) corr = train[qual_encoded + ["SalePrice"]].corr() sns.heatmap(corr) plt.figure(3) corr = pd.DataFrame( np.zeros([len(quantitative) + 1, len(qual_encoded) + 1]), index=quantitative + ["SalePrice"], columns=qual_encoded + ["SalePrice"], ) for q1 in quantitative + ["SalePrice"]: for q2 in qual_encoded + ["SalePrice"]: corr.loc[q1, q2] = train[q1].corr(train[q2]) sns.heatmap(corr) # Comparing house sale price with independent variables def pairplot(x, y, **kwargs): ax = plt.gca() ts = pd.DataFrame({"time": x, "val": y}) ts = ts.groupby("time").mean() ts.plot(ax=ax) plt.xticks(rotation=90) f = pd.melt(train, id_vars=["SalePrice"], value_vars=quantitative + qual_encoded) g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False) g = g.map(pairplot, "value", "SalePrice") # Houses divided into 2 price groups (under 200000 and over 200000) features = quantitative standard = train[train["SalePrice"] < 200000] pricey = train[train["SalePrice"] >= 200000] diff = pd.DataFrame() diff["feature"] = features diff["difference"] = [ (pricey[f].fillna(0.0).mean() - standard[f].fillna(0.0).mean()) / (standard[f].fillna(0.0).mean()) for f in features ] sns.barplot(data=diff, x="feature", y="difference") x = plt.xticks(rotation=90) features = quantitative + qual_encoded model = TSNE(n_components=2, random_state=0, perplexity=50) X = train[features].fillna(0.0).values tsne = model.fit_transform(X) std = StandardScaler() s = std.fit_transform(X) pca = PCA(n_components=30) pca.fit(s) pc = pca.transform(s) kmeans = KMeans(n_clusters=5) kmeans.fit(pc) fr = pd.DataFrame({"tsne1": tsne[:, 0], "tsne2": tsne[:, 1], "cluster": kmeans.labels_}) sns.lmplot(data=fr, x="tsne1", y="tsne2", hue="cluster", fit_reg=False) print(np.sum(pca.explained_variance_ratio_)) y = train["SalePrice"].values def johnson(y): gamma, eta, epsilon, lbda = stats.johnsonsu.fit(y) yt = gamma + eta * np.arcsinh((y - epsilon) / lbda) return yt, gamma, eta, epsilon, lbda def johnson_inverse(y, gamma, eta, epsilon, lbda): return lbda * np.sinh((y - gamma) / eta) + epsilon yt, g, et, ep, l = johnson(y) yt2 = johnson_inverse(yt, g, et, ep, l) plt.figure(1) sns.distplot(yt) plt.figure(2) sns.distplot(yt2) # adding quadratic term to improve local score def error(actual, predicted): actual = np.log(actual) predicted = np.log(predicted) return np.sqrt(np.sum(np.square(actual - predicted)) / len(actual)) def log_transform(feature): train[feature] = np.log1p(train[feature].values) def quadratic(feature): train[feature + "2"] = train[feature] ** 2 log_transform("GrLivArea") log_transform("1stFlrSF") log_transform("2ndFlrSF") log_transform("TotalBsmtSF") log_transform("LotArea") log_transform("LotFrontage") log_transform("KitchenAbvGr") log_transform("GarageArea") quadratic("OverallQual") quadratic("YearBuilt") quadratic("YearRemodAdd") quadratic("TotalBsmtSF") quadratic("2ndFlrSF") quadratic("Neighborhood_E") quadratic("RoofMatl_E") quadratic("GrLivArea") qdr = [ "OverallQual2", "YearBuilt2", "YearRemodAdd2", "TotalBsmtSF2", "2ndFlrSF2", "Neighborhood_E2", "RoofMatl_E2", "GrLivArea2", ] train["HasBasement"] = train["TotalBsmtSF"].apply(lambda x: 1 if x > 0 else 0) train["HasGarage"] = train["GarageArea"].apply(lambda x: 1 if x > 0 else 0) train["Has2ndFloor"] = train["2ndFlrSF"].apply(lambda x: 1 if x > 0 else 0) train["HasMasVnr"] = train["MasVnrArea"].apply(lambda x: 1 if x > 0 else 0) train["HasWoodDeck"] = train["WoodDeckSF"].apply(lambda x: 1 if x > 0 else 0) train["HasPorch"] = train["OpenPorchSF"].apply(lambda x: 1 if x > 0 else 0) train["HasPool"] = train["PoolArea"].apply(lambda x: 1 if x > 0 else 0) train["IsNew"] = train["YearBuilt"].apply(lambda x: 1 if x > 2000 else 0) boolean = [ "HasBasement", "HasGarage", "Has2ndFloor", "HasMasVnr", "HasWoodDeck", "HasPorch", "HasPool", "IsNew", ] features = quantitative + qual_encoded + boolean + qdr lasso = linear_model.LassoLarsCV(max_iter=10000) X = train[features].fillna(0.0).values Y = train["SalePrice"].values lasso.fit(X, np.log(Y)) Ypred = np.exp(lasso.predict(X)) error(Y, Ypred) # dividing variables into splines import patsy Y, X = patsy.dmatrices( "SalePrice ~ \ GarageCars + \ np.log1p(BsmtFinSF1) + \ ScreenPorch + \ Condition1_E + \ Condition2_E + \ WoodDeckSF + \ np.log1p(LotArea) + \ Foundation_E + \ MSZoning_E + \ MasVnrType_E + \ HouseStyle_E + \ Fireplaces + \ CentralAir_E + \ BsmtFullBath + \ EnclosedPorch + \ PavedDrive_E + \ ExterQual_E + \ bs(OverallCond, df=7, degree=1) + \ bs(MSSubClass, df=7, degree=1) + \ bs(LotArea, df=2, degree=1) + \ bs(FullBath, df=3, degree=1) + \ bs(HalfBath, df=2, degree=1) + \ bs(BsmtFullBath, df=3, degree=1) + \ bs(TotRmsAbvGrd, df=2, degree=1) + \ bs(LandSlope_E, df=2, degree=1) + \ bs(LotConfig_E, df=2, degree=1) + \ bs(SaleCondition_E, df=3, degree=1) + \ OverallQual + np.square(OverallQual) + \ GrLivArea + np.square(GrLivArea) + \ Q('1stFlrSF') + np.square(Q('1stFlrSF')) + \ Q('2ndFlrSF') + np.square(Q('2ndFlrSF')) + \ TotalBsmtSF + np.square(TotalBsmtSF) + \ KitchenAbvGr + np.square(KitchenAbvGr) + \ YearBuilt + np.square(YearBuilt) + \ Neighborhood_E + np.square(Neighborhood_E) + \ Neighborhood_E:OverallQual + \ MSSubClass:BldgType_E + \ ExterQual_E:OverallQual + \ PoolArea:PoolQC_E + \ Fireplaces:FireplaceQu_E + \ OverallQual:KitchenQual_E + \ GarageQual_E:GarageCond + \ GarageArea:GarageCars + \ Q('1stFlrSF'):TotalBsmtSF + \ TotRmsAbvGrd:GrLivArea", train.to_dict("list"), ) ridge = linear_model.RidgeCV(cv=10) ridge.fit(X, np.log(Y)) Ypred = np.exp(ridge.predict(X)) print(error(Y, Ypred))
false
0
3,879
0
3,879
3,879
129116166
# packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import json # load JSON file with open( "../input/google-research-identify-contrails-reduce-global-warming/train_metadata.json", "r", ) as f: dict_train_meta = json.load(f) # look at first entry dict_train_meta[0] # convert to data frame df_train_meta = pd.DataFrame.from_dict(dict_train_meta) df_train_meta.head() df_train_meta.info() features_num = ["row_min", "row_size", "col_min", "col_size", "timestamp"] # basic stats df_train_meta[features_num].describe() # pairwise scatterplots sns.pairplot(df_train_meta[features_num]) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/116/129116166.ipynb
null
null
[{"Id": 129116166, "ScriptId": 38383919, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1330628, "CreationDate": "05/11/2023 06:04:42", "VersionNumber": 3.0, "Title": "Contrails - Metadata", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 31.0, "LinesInsertedFromPrevious": 14.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 17.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import json # load JSON file with open( "../input/google-research-identify-contrails-reduce-global-warming/train_metadata.json", "r", ) as f: dict_train_meta = json.load(f) # look at first entry dict_train_meta[0] # convert to data frame df_train_meta = pd.DataFrame.from_dict(dict_train_meta) df_train_meta.head() df_train_meta.info() features_num = ["row_min", "row_size", "col_min", "col_size", "timestamp"] # basic stats df_train_meta[features_num].describe() # pairwise scatterplots sns.pairplot(df_train_meta[features_num]) plt.show()
false
0
218
0
218
218
129116217
import numpy as np import pandas as pd import pandas as pd from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn import metrics from sklearn.metrics import r2_score, mean_squared_error, make_scorer from sklearn.metrics import ( confusion_matrix, precision_score, recall_score, classification_report, accuracy_score, ) from sklearn.model_selection import StratifiedKFold, KFold from sklearn.model_selection import cross_val_score from sklearn.preprocessing import MinMaxScaler import warnings warnings.filterwarnings("ignore") # # 1. Define Metric SMAPE # def smape(y_true, y_pred): # # y_true = y_true + 1 # # y_pred = y_pred + 1 # smap = np.zeros(len(y_true)) # num = np.abs(y_true - y_pred) # dem = ((np.abs(y_true) + np.abs(y_pred)) / 2) # pos_ind = (y_true!=0)|(y_pred!=0) # smap[pos_ind] = num[pos_ind] / dem[pos_ind] # return 100 * np.mean(smap) def smape(y_true, y_pred): y_true = y_true + 1 y_pred = y_pred + 1 smap = np.zeros(len(y_true)) num = np.abs(y_true - y_pred) dem = (np.abs(y_true) + np.abs(y_pred)) / 2 pos_ind = (y_true != 0) | (y_pred != 0) smap[pos_ind] = num[pos_ind] / dem[pos_ind] return 100 * np.mean(smap) # # 2. Load Train and Sample Test Data # ## 2.1 Protein data proteins = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv" ) print("Proteins shape:", proteins.shape) proteins.head() # ## 2.2 Peptides data peptides = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv" ) print("Peptides shape:", peptides.shape) peptides.head() # ## 2.3 Clinical data clinical = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv" ) print("Clinical shape:", clinical.shape) clinical.head() supplemental_clinical_data = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv" ) supplemental_clinical_data = supplemental_clinical_data df_sup = ( pd.concat([clinical, supplemental_clinical_data]) .drop(columns="upd23b_clinical_state_on_medication") .reset_index(drop=True) ) df_sup.head() df_sup df_sup.isna().sum() target_columns_clinical_data = ["updrs_1"] target_columns_clinical_and_supplemental_data = ["updrs_2", "updrs_3", "updrs_4"] target_visit_month_medians_clinical_data = clinical.groupby("visit_month")[ target_columns_clinical_data ].median() target_visit_month_medians_clinical_and_supplemental_data = ( pd.concat((clinical, supplemental_clinical_data), axis=0) .groupby("visit_month")[target_columns_clinical_and_supplemental_data] .median() ) # Drop 5th month visit that is coming from the supplemental clinical data target_visit_month_medians_clinical_and_supplemental_data = ( target_visit_month_medians_clinical_and_supplemental_data.drop(5) ) # Concatenate visit_month medians of targets target_visit_month_medians = pd.concat( ( target_visit_month_medians_clinical_data, target_visit_month_medians_clinical_and_supplemental_data, ), axis=1, ignore_index=False, ) # Replace expanding window max of updrs values with current updrs values target_visit_month_medians = target_visit_month_medians.expanding(min_periods=1).max() target_visit_month_medians = target_visit_month_medians.rename( columns={ "updrs_1": "updrs_1_median", "updrs_2": "updrs_2_median", "updrs_3": "updrs_3_median", "updrs_4": "updrs_4_median", } ) target_visit_month_medians from sklearn.decomposition import PCA def get_pca_df(df, df_proteins, df_peptides, comp): df_prot_pept = df_proteins.merge( df_peptides[["visit_id", "UniProt", "Peptide", "PeptideAbundance"]], on=["visit_id", "UniProt"], how="left", ) df_all = pd.merge(df_prot_pept, df, how="left") # df_all = df_all.drop(columns = 'upd23b_clinical_state_on_medication') protein_pivot = ( df_proteins.pivot(index="visit_id", columns="UniProt", values="NPX") .rename_axis(columns=None) .reset_index() ) peptide_pivot = ( df_peptides.pivot( index="visit_id", columns="Peptide", values="PeptideAbundance" ) .rename_axis(columns=None) .reset_index() ) df_prot_pept = protein_pivot.merge(peptide_pivot, on=["visit_id"], how="left") df_all = pd.merge(df, df_prot_pept, how="left") # df_all = df_all.drop(columns = 'upd23b_clinical_state_on_medication') col_df_all = df_prot_pept.drop("visit_id", axis=1).columns.to_list() df_all[col_df_all] = df_all[col_df_all].fillna(0) pca = PCA(n_components=1) principalComponents = pca.fit_transform(df_all[col_df_all]) print(f"Explained variance for {comp} componed is {pca.explained_variance_ratio_}") df_pca = pd.DataFrame(principalComponents, columns=["PCA_1"]) drop_columns = df_all[col_df_all].columns.to_list() df_ = pd.concat([df_all.drop(drop_columns, axis=1), df_pca], axis=1) return df_ df_pca_train = get_pca_df(df_sup, proteins, peptides, 1) df_pca_train = pd.merge( df_pca_train, target_visit_month_medians.reset_index(), on="visit_month", how="left" ) col_median = target_visit_month_medians.columns df_pca_train[col_median] = df_pca_train[col_median].fillna( df_pca_train[col_median].median() ) df_pca_train.head() df_pca_train.shape df_sup.shape df_pca_train.isna().sum() # # 5. Training df_pca_train.head() df_0_1 = ( df_pca_train[["visit_id", "updrs_1", "patient_id", "visit_month", "PCA_1"]] .dropna(subset=["updrs_1"]) .reset_index(drop=True) ) df_0_2 = ( df_pca_train[["visit_id", "updrs_2", "patient_id", "visit_month", "PCA_1"]] .dropna(subset=["updrs_2"]) .reset_index(drop=True) ) df_0_3 = ( df_pca_train[["visit_id", "updrs_3", "patient_id", "visit_month", "PCA_1"]] .dropna(subset=["updrs_3"]) .reset_index(drop=True) ) df_0_3 = df_0_3[df_0_3["updrs_3"] != 0].reset_index(drop=True) df_0_4 = ( df_pca_train[["visit_id", "updrs_4", "patient_id", "visit_month", "PCA_1"]] .dropna(subset=["updrs_4"]) .reset_index(drop=True) ) df_lst = [df_0_1, df_0_2, df_0_3, df_0_4] df_lst[2].isna().sum() df_lst[2] from sklearn.model_selection import GroupKFold def objective(trial): """ Objective function to be passed to Optuna It also does Cross-Validation """ param = { "random_state": 42, "n_estimators": trial.suggest_int("n_estimators", 10, 300, step=10), "criterion": trial.suggest_categorical("criterion", choices=["squared_error"]), "max_depth": trial.suggest_int("max_depth", 1, 10), "max_features": trial.suggest_categorical( "max_features", choices=["auto", "sqrt"] ), "max_samples": trial.suggest_float("max_samples", 0.2, 0.95, step=0.05), "min_samples_leaf": trial.suggest_int("min_samples_leaf", 1, 20, step=3), } model_params = param rf.set_params(**model_params) # kf = KFold(n_splits = 10, shuffle = True, random_state = 42) gkf = GroupKFold(n_splits=18) groups = train_0["patient_id"] X = train_0.drop([f"updrs_{i+1}"], axis=1) y = train_0[f"updrs_{i+1}"] for fold, (train_index, test_index) in enumerate(gkf.split(X, y, groups)): train_0.loc[test_index, "kfold"] = fold # fold_data = pd.merge(df_visit_id,train_0,on='patient_id') smape_scores = [] train_smape_scores = [] metrics_new = [] for fold in range(18): df_train = train_0[train_0.kfold != fold] df_valid = train_0[train_0.kfold == fold] feature_cols = train_0.drop(["kfold", f"updrs_{i+1}"], axis=1).columns.tolist() target_cols = [f"updrs_{i+1}"] X_train = df_train[feature_cols] y_train = df_train[target_cols].values.ravel() X_val = df_valid[feature_cols] y_val = df_valid[target_cols].values.ravel() rf.fit(X_train, y_train) train_smape_score = smape(y_train, np.round(rf.predict(X_train))) smape_score = smape(y_val, np.round(rf.predict(X_val))) trainVal_auc = train_smape_score - smape_score smape_scores.append(smape_score) train_smape_scores.append(train_smape_score) # metrics_to_improve = smape_score/abs(trainVal_auc) metrics_new.append(abs(trainVal_auc)) # print(f'train score {(train_smape_score)} , val score {(smape_score)}, difference for score {(abs(trainVal_auc))}') metrics_Mean_Diff = abs(np.mean(train_smape_scores) - np.mean(smape_scores)) print( f"train score {np.mean(train_smape_scores)} , val score {np.mean(smape_scores)}, difference for mean score {(metrics_Mean_Diff)}" ) return metrics_Mean_Diff # import xgboost as xgb ### Optimization ### import optuna from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor model = {} # mms_store = {} # mms = MinMaxScaler() smape_avg = [] for i in range(3): print("--------------------------------------------------------") print("#" * 17, "Model", i + 1, "#" * 17) train_0 = df_lst[i] # scale_col = ['NPX_mean','NPX_median', 'Abe_mean', 'Abe_median'] # train_0[scale_col] = mms.fit_transform(train_0[scale_col]) rf = RandomForestRegressor(random_state=42, criterion=smape) # rf = xgb.XGBRegressor(random_state= 42) # lr = LinearRegression() # Bayesian Optimization study = optuna.create_study( direction="minimize", sampler=optuna.samplers.TPESampler(seed=42) ) study.optimize(objective, n_trials=10) # Best hyperparameters best_params = study.best_params rf.set_params(**best_params) X = train_0.drop(columns=[f"updrs_{i+1}", "kfold"]) # X = train_0[['visit_month']+ scale_col] y = train_0[f"updrs_{i+1}"] rf.fit(X, y) smape_avg.append(smape(y, np.round(rf.predict(X)))) print(f"Train smape: {(smape(y, np.round(rf.predict(X))))}") print("#" * 45) model[i] = rf # mms_store[i] = mms print(f"SMAPE AVEGARE FOR ALL IS {np.mean(smape_avg)}") # # 6. Inference import amp_pd_peptide_310 amp_pd_peptide_310.make_env.func_dict["__called__"] = False env = amp_pd_peptide_310.make_env() iter_test = env.iter_test() def map_test(x): updrs = x.split("_")[2] + "_" + x.split("_")[3] month = int(x.split("_plus_")[1].split("_")[0]) visit_id = x.split("_")[0] + "_" + x.split("_")[1] # set all predictions 0 where updrs equals 'updrs_4' if updrs == "updrs_3": # rating = updrs_3_pred[month] rating = df[df.visit_id == visit_id]["pred2"].values[0] elif updrs == "updrs_4": rating = 0 elif updrs == "updrs_1": rating = df[df.visit_id == visit_id]["pred0"].values[0] else: rating = df[df.visit_id == visit_id]["pred1"].values[0] return rating # NEW for test, test_peptides, test_proteins, sample_submission in iter_test: df = test[["visit_id", "patient_id", "visit_month"]].drop_duplicates("visit_id") pred_0 = get_pca_df(df, test_proteins, test_peptides, 1) pred_0 = pred_0[["visit_id", "patient_id", "visit_month", "PCA_1"]] pred_0 = model[0].predict(pred_0) df["pred0"] = np.round(pred_0) pred_1 = get_pca_df(df, test_proteins, test_peptides, 1) pred_1 = pred_1[["visit_id", "patient_id", "visit_month", "PCA_1"]] pred_1 = model[1].predict(pred_1) df["pred1"] = np.round(pred_1) pred_2 = get_pca_df(df, test_proteins, test_peptides, 1) pred_2 = pred_2[["visit_id", "patient_id", "visit_month", "PCA_1"]] pred_2 = model[2].predict(pred_2) df["pred2"] = np.round(pred_2) sample_submission["rating"] = sample_submission["prediction_id"].apply(map_test) print(sample_submission) env.predict(sample_submission)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/116/129116217.ipynb
null
null
[{"Id": 129116217, "ScriptId": 38383026, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14390726, "CreationDate": "05/11/2023 06:05:12", "VersionNumber": 2.0, "Title": "PCA with Optuna (56.7)", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 315.0, "LinesInsertedFromPrevious": 21.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 294.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd import pandas as pd from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn import metrics from sklearn.metrics import r2_score, mean_squared_error, make_scorer from sklearn.metrics import ( confusion_matrix, precision_score, recall_score, classification_report, accuracy_score, ) from sklearn.model_selection import StratifiedKFold, KFold from sklearn.model_selection import cross_val_score from sklearn.preprocessing import MinMaxScaler import warnings warnings.filterwarnings("ignore") # # 1. Define Metric SMAPE # def smape(y_true, y_pred): # # y_true = y_true + 1 # # y_pred = y_pred + 1 # smap = np.zeros(len(y_true)) # num = np.abs(y_true - y_pred) # dem = ((np.abs(y_true) + np.abs(y_pred)) / 2) # pos_ind = (y_true!=0)|(y_pred!=0) # smap[pos_ind] = num[pos_ind] / dem[pos_ind] # return 100 * np.mean(smap) def smape(y_true, y_pred): y_true = y_true + 1 y_pred = y_pred + 1 smap = np.zeros(len(y_true)) num = np.abs(y_true - y_pred) dem = (np.abs(y_true) + np.abs(y_pred)) / 2 pos_ind = (y_true != 0) | (y_pred != 0) smap[pos_ind] = num[pos_ind] / dem[pos_ind] return 100 * np.mean(smap) # # 2. Load Train and Sample Test Data # ## 2.1 Protein data proteins = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv" ) print("Proteins shape:", proteins.shape) proteins.head() # ## 2.2 Peptides data peptides = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv" ) print("Peptides shape:", peptides.shape) peptides.head() # ## 2.3 Clinical data clinical = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv" ) print("Clinical shape:", clinical.shape) clinical.head() supplemental_clinical_data = pd.read_csv( "/kaggle/input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv" ) supplemental_clinical_data = supplemental_clinical_data df_sup = ( pd.concat([clinical, supplemental_clinical_data]) .drop(columns="upd23b_clinical_state_on_medication") .reset_index(drop=True) ) df_sup.head() df_sup df_sup.isna().sum() target_columns_clinical_data = ["updrs_1"] target_columns_clinical_and_supplemental_data = ["updrs_2", "updrs_3", "updrs_4"] target_visit_month_medians_clinical_data = clinical.groupby("visit_month")[ target_columns_clinical_data ].median() target_visit_month_medians_clinical_and_supplemental_data = ( pd.concat((clinical, supplemental_clinical_data), axis=0) .groupby("visit_month")[target_columns_clinical_and_supplemental_data] .median() ) # Drop 5th month visit that is coming from the supplemental clinical data target_visit_month_medians_clinical_and_supplemental_data = ( target_visit_month_medians_clinical_and_supplemental_data.drop(5) ) # Concatenate visit_month medians of targets target_visit_month_medians = pd.concat( ( target_visit_month_medians_clinical_data, target_visit_month_medians_clinical_and_supplemental_data, ), axis=1, ignore_index=False, ) # Replace expanding window max of updrs values with current updrs values target_visit_month_medians = target_visit_month_medians.expanding(min_periods=1).max() target_visit_month_medians = target_visit_month_medians.rename( columns={ "updrs_1": "updrs_1_median", "updrs_2": "updrs_2_median", "updrs_3": "updrs_3_median", "updrs_4": "updrs_4_median", } ) target_visit_month_medians from sklearn.decomposition import PCA def get_pca_df(df, df_proteins, df_peptides, comp): df_prot_pept = df_proteins.merge( df_peptides[["visit_id", "UniProt", "Peptide", "PeptideAbundance"]], on=["visit_id", "UniProt"], how="left", ) df_all = pd.merge(df_prot_pept, df, how="left") # df_all = df_all.drop(columns = 'upd23b_clinical_state_on_medication') protein_pivot = ( df_proteins.pivot(index="visit_id", columns="UniProt", values="NPX") .rename_axis(columns=None) .reset_index() ) peptide_pivot = ( df_peptides.pivot( index="visit_id", columns="Peptide", values="PeptideAbundance" ) .rename_axis(columns=None) .reset_index() ) df_prot_pept = protein_pivot.merge(peptide_pivot, on=["visit_id"], how="left") df_all = pd.merge(df, df_prot_pept, how="left") # df_all = df_all.drop(columns = 'upd23b_clinical_state_on_medication') col_df_all = df_prot_pept.drop("visit_id", axis=1).columns.to_list() df_all[col_df_all] = df_all[col_df_all].fillna(0) pca = PCA(n_components=1) principalComponents = pca.fit_transform(df_all[col_df_all]) print(f"Explained variance for {comp} componed is {pca.explained_variance_ratio_}") df_pca = pd.DataFrame(principalComponents, columns=["PCA_1"]) drop_columns = df_all[col_df_all].columns.to_list() df_ = pd.concat([df_all.drop(drop_columns, axis=1), df_pca], axis=1) return df_ df_pca_train = get_pca_df(df_sup, proteins, peptides, 1) df_pca_train = pd.merge( df_pca_train, target_visit_month_medians.reset_index(), on="visit_month", how="left" ) col_median = target_visit_month_medians.columns df_pca_train[col_median] = df_pca_train[col_median].fillna( df_pca_train[col_median].median() ) df_pca_train.head() df_pca_train.shape df_sup.shape df_pca_train.isna().sum() # # 5. Training df_pca_train.head() df_0_1 = ( df_pca_train[["visit_id", "updrs_1", "patient_id", "visit_month", "PCA_1"]] .dropna(subset=["updrs_1"]) .reset_index(drop=True) ) df_0_2 = ( df_pca_train[["visit_id", "updrs_2", "patient_id", "visit_month", "PCA_1"]] .dropna(subset=["updrs_2"]) .reset_index(drop=True) ) df_0_3 = ( df_pca_train[["visit_id", "updrs_3", "patient_id", "visit_month", "PCA_1"]] .dropna(subset=["updrs_3"]) .reset_index(drop=True) ) df_0_3 = df_0_3[df_0_3["updrs_3"] != 0].reset_index(drop=True) df_0_4 = ( df_pca_train[["visit_id", "updrs_4", "patient_id", "visit_month", "PCA_1"]] .dropna(subset=["updrs_4"]) .reset_index(drop=True) ) df_lst = [df_0_1, df_0_2, df_0_3, df_0_4] df_lst[2].isna().sum() df_lst[2] from sklearn.model_selection import GroupKFold def objective(trial): """ Objective function to be passed to Optuna It also does Cross-Validation """ param = { "random_state": 42, "n_estimators": trial.suggest_int("n_estimators", 10, 300, step=10), "criterion": trial.suggest_categorical("criterion", choices=["squared_error"]), "max_depth": trial.suggest_int("max_depth", 1, 10), "max_features": trial.suggest_categorical( "max_features", choices=["auto", "sqrt"] ), "max_samples": trial.suggest_float("max_samples", 0.2, 0.95, step=0.05), "min_samples_leaf": trial.suggest_int("min_samples_leaf", 1, 20, step=3), } model_params = param rf.set_params(**model_params) # kf = KFold(n_splits = 10, shuffle = True, random_state = 42) gkf = GroupKFold(n_splits=18) groups = train_0["patient_id"] X = train_0.drop([f"updrs_{i+1}"], axis=1) y = train_0[f"updrs_{i+1}"] for fold, (train_index, test_index) in enumerate(gkf.split(X, y, groups)): train_0.loc[test_index, "kfold"] = fold # fold_data = pd.merge(df_visit_id,train_0,on='patient_id') smape_scores = [] train_smape_scores = [] metrics_new = [] for fold in range(18): df_train = train_0[train_0.kfold != fold] df_valid = train_0[train_0.kfold == fold] feature_cols = train_0.drop(["kfold", f"updrs_{i+1}"], axis=1).columns.tolist() target_cols = [f"updrs_{i+1}"] X_train = df_train[feature_cols] y_train = df_train[target_cols].values.ravel() X_val = df_valid[feature_cols] y_val = df_valid[target_cols].values.ravel() rf.fit(X_train, y_train) train_smape_score = smape(y_train, np.round(rf.predict(X_train))) smape_score = smape(y_val, np.round(rf.predict(X_val))) trainVal_auc = train_smape_score - smape_score smape_scores.append(smape_score) train_smape_scores.append(train_smape_score) # metrics_to_improve = smape_score/abs(trainVal_auc) metrics_new.append(abs(trainVal_auc)) # print(f'train score {(train_smape_score)} , val score {(smape_score)}, difference for score {(abs(trainVal_auc))}') metrics_Mean_Diff = abs(np.mean(train_smape_scores) - np.mean(smape_scores)) print( f"train score {np.mean(train_smape_scores)} , val score {np.mean(smape_scores)}, difference for mean score {(metrics_Mean_Diff)}" ) return metrics_Mean_Diff # import xgboost as xgb ### Optimization ### import optuna from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor model = {} # mms_store = {} # mms = MinMaxScaler() smape_avg = [] for i in range(3): print("--------------------------------------------------------") print("#" * 17, "Model", i + 1, "#" * 17) train_0 = df_lst[i] # scale_col = ['NPX_mean','NPX_median', 'Abe_mean', 'Abe_median'] # train_0[scale_col] = mms.fit_transform(train_0[scale_col]) rf = RandomForestRegressor(random_state=42, criterion=smape) # rf = xgb.XGBRegressor(random_state= 42) # lr = LinearRegression() # Bayesian Optimization study = optuna.create_study( direction="minimize", sampler=optuna.samplers.TPESampler(seed=42) ) study.optimize(objective, n_trials=10) # Best hyperparameters best_params = study.best_params rf.set_params(**best_params) X = train_0.drop(columns=[f"updrs_{i+1}", "kfold"]) # X = train_0[['visit_month']+ scale_col] y = train_0[f"updrs_{i+1}"] rf.fit(X, y) smape_avg.append(smape(y, np.round(rf.predict(X)))) print(f"Train smape: {(smape(y, np.round(rf.predict(X))))}") print("#" * 45) model[i] = rf # mms_store[i] = mms print(f"SMAPE AVEGARE FOR ALL IS {np.mean(smape_avg)}") # # 6. Inference import amp_pd_peptide_310 amp_pd_peptide_310.make_env.func_dict["__called__"] = False env = amp_pd_peptide_310.make_env() iter_test = env.iter_test() def map_test(x): updrs = x.split("_")[2] + "_" + x.split("_")[3] month = int(x.split("_plus_")[1].split("_")[0]) visit_id = x.split("_")[0] + "_" + x.split("_")[1] # set all predictions 0 where updrs equals 'updrs_4' if updrs == "updrs_3": # rating = updrs_3_pred[month] rating = df[df.visit_id == visit_id]["pred2"].values[0] elif updrs == "updrs_4": rating = 0 elif updrs == "updrs_1": rating = df[df.visit_id == visit_id]["pred0"].values[0] else: rating = df[df.visit_id == visit_id]["pred1"].values[0] return rating # NEW for test, test_peptides, test_proteins, sample_submission in iter_test: df = test[["visit_id", "patient_id", "visit_month"]].drop_duplicates("visit_id") pred_0 = get_pca_df(df, test_proteins, test_peptides, 1) pred_0 = pred_0[["visit_id", "patient_id", "visit_month", "PCA_1"]] pred_0 = model[0].predict(pred_0) df["pred0"] = np.round(pred_0) pred_1 = get_pca_df(df, test_proteins, test_peptides, 1) pred_1 = pred_1[["visit_id", "patient_id", "visit_month", "PCA_1"]] pred_1 = model[1].predict(pred_1) df["pred1"] = np.round(pred_1) pred_2 = get_pca_df(df, test_proteins, test_peptides, 1) pred_2 = pred_2[["visit_id", "patient_id", "visit_month", "PCA_1"]] pred_2 = model[2].predict(pred_2) df["pred2"] = np.round(pred_2) sample_submission["rating"] = sample_submission["prediction_id"].apply(map_test) print(sample_submission) env.predict(sample_submission)
false
0
4,351
0
4,351
4,351
129116999
<jupyter_start><jupyter_text>AirIndia Monthly Passenger Traffic This dataset is about airline operations and performance. The data is quantitative and numerical in nature. It can be analyzed and used to derive insights on the airline's performance, capacity utilization, revenue generation, and efficiency. This type of data is commonly used in the airline industry for performance analysis, benchmarking, and decision-making purposes. **1. Month:** This column refers to the month in which the data was recorded. **2. DEPARTURES:** The number of flights that departed during the month in question. **3. HOURS:** Hours flown by the airline during the month in question. This can be used to track the airline's utilization of its fleet. **4. KILOMETRE(TH):** Kilometers flown by the airline during the month, measured in thousands. This can be used to track the airline's overall operational performance. **5. PASSENGERS CARRIED:** Number of passengers carried by the airline during a given month. **6. PASSENGER KMS.PERFORMED(TH):** Passenger kilometers performed by the airline during the month, measured in thousands. This can be used to track the airline's revenue performance. **7. AVAILABLE SEAT KILOMETRE(TH):** Seat kilometers available on the airline's flights during the month, measured in thousands. This can be used to track the airline's capacity utilization. **8. PAX.LOAD FACTOR (IN %)**: Percentage of available seats that were actually occupied by passengers during the month in question. This is a key metric for airlines, as it indicates how effectively they are filling their planes. Kaggle dataset identifier: airindia-monthly-passenger-traffic <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df_dom = pd.read_csv( "/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (Domestic).csv" ) df_dom.head() df_dom.shape df_dom.info() df_dom.isna().sum() df_dom.fillna(0) num_cols = df_dom.select_dtypes(include=np.number).columns.tolist() print("Numerical columns are: ", num_cols) import matplotlib.pyplot as plt import seaborn as sns df_dom_monthly = df_dom.groupby("Month").mean() df_dom_monthly for col in df_dom_monthly: df_dom_monthly[col] = round(df_dom_monthly[col], 2) for col in df_dom_monthly: fig, ax = plt.subplots(1, 1, figsize=(15, 10)) plt.title(f"Month vs {col}") sns.barplot(x=df_dom_monthly.index, y=df_dom_monthly[col], palette="mako") for index in range(len(df_dom_monthly.index)): ax.text( index, df_dom_monthly.iloc[index][col], df_dom_monthly.iloc[index][col], ha="center", ) diff = max(df_dom_monthly[col]) - min(df_dom_monthly[col]) diff = round(diff, 2) ax.text( 1.32, 0.5, f"Diff betn best and worst performing month: {diff}", fontsize=20, horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, ) # * **There seems to be some drawdown in April that has caused poor performance overall** # * **September is seen as the best performer in all terms** df_dom_year = df_dom.groupby("FY").sum() df_dom_year df_dom_year[" PAX. LOAD FACTOR#\n(IN %)"] = df_dom.groupby("FY")[ " PAX. LOAD FACTOR#\n(IN %)" ].mean() df_dom_year[" PAX. LOAD FACTOR#\n(IN %)"] = round( df_dom_year[" PAX. LOAD FACTOR#\n(IN %)"], 2 ) df_dom_year for col in df_dom_year: fig, ax = plt.subplots(1, 1, figsize=(15, 10)) plt.title(f"Month vs {col}") sns.barplot(x=df_dom_year.index, y=df_dom_year[col], palette="mako") for index in range(len(df_dom_year.index)): ax.text( index, df_dom_year.iloc[index][col], df_dom_year.iloc[index][col], ha="center", ) drop = ( (df_dom_year.iloc[6][col] - df_dom_year.iloc[7][col]) / df_dom_year.iloc[6][col] * 100 ) # FY20 and FY21 drop = round(drop, 2) plt.text( 1.3, 0.2, f"Percentage decrease due to COVID19: {drop}", fontsize=20, fontweight="bold", bbox=dict(facecolor="red", alpha=0.5), horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, ) recov = ( (df_dom_year.iloc[9][col] - df_dom_year.iloc[7][col]) / df_dom_year.iloc[7][col] * 100 ) # FY23 and #FY21 recov = round(recov, 2) plt.text( 1.3, 0.4, f"Percentage increase since COVID19: {recov}", fontsize=20, fontweight="bold", bbox=dict(facecolor="green", alpha=0.5), horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, ) pre = ( (df_dom_year.iloc[6][col] - df_dom_year.iloc[9][col]) / df_dom_year.iloc[6][col] * 100 ) pre = round(pre, 2) plt.text( 1.4, 0.6, f"Percentage increase to attain pre-COVID19 levels: {pre}", fontsize=20, fontweight="bold", bbox=dict(facecolor="blue", alpha=0.5), horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/116/129116999.ipynb
airindia-monthly-passenger-traffic
nishantbhardwaj07
[{"Id": 129116999, "ScriptId": 38378133, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8456841, "CreationDate": "05/11/2023 06:13:34", "VersionNumber": 1.0, "Title": "Air India Domestic Analysis", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 99.0, "LinesInsertedFromPrevious": 99.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 184887662, "KernelVersionId": 129116999, "SourceDatasetVersionId": 5467490}]
[{"Id": 5467490, "DatasetId": 3156392, "DatasourceVersionId": 5541690, "CreatorUserId": 9217065, "LicenseName": "Other (specified in description)", "CreationDate": "04/20/2023 11:26:18", "VersionNumber": 7.0, "Title": "AirIndia Monthly Passenger Traffic", "Slug": "airindia-monthly-passenger-traffic", "Subtitle": "Monthly Traffic & Operating Data of AirIndia for past 10 Financial Years FY14-23", "Description": "This dataset is about airline operations and performance. The data is quantitative and numerical in nature. It can be analyzed and used to derive insights on the airline's performance, capacity utilization, revenue generation, and efficiency. This type of data is commonly used in the airline industry for performance analysis, benchmarking, and decision-making purposes.\n\n**1. Month:** This column refers to the month in which the data was recorded.\n\n**2. DEPARTURES:** The number of flights that departed during the month in question.\n\n**3. HOURS:** Hours flown by the airline during the month in question. This can be used to track the airline's utilization of its fleet.\n\n**4. KILOMETRE(TH):** Kilometers flown by the airline during the month, measured in thousands. This can be used to track the airline's overall operational performance.\n\n**5. PASSENGERS CARRIED:** Number of passengers carried by the airline during a given month.\n\n**6. PASSENGER KMS.PERFORMED(TH):** Passenger kilometers performed by the airline during the month, measured in thousands. This can be used to track the airline's revenue performance.\n\n**7. AVAILABLE SEAT KILOMETRE(TH):** Seat kilometers available on the airline's flights during the month, measured in thousands. This can be used to track the airline's capacity utilization.\n\n**8. PAX.LOAD FACTOR (IN %)**: Percentage of available seats that were actually occupied by passengers during the month in question. This is a key metric for airlines, as it indicates how effectively they are filling their planes.", "VersionNotes": "Data Update 2023-04-20", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3156392, "CreatorUserId": 9217065, "OwnerUserId": 9217065.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5467490.0, "CurrentDatasourceVersionId": 5541690.0, "ForumId": 3220424, "Type": 2, "CreationDate": "04/20/2023 04:40:15", "LastActivityDate": "04/20/2023", "TotalViews": 8752, "TotalDownloads": 1694, "TotalVotes": 38, "TotalKernels": 8}]
[{"Id": 9217065, "UserName": "nishantbhardwaj07", "DisplayName": "NishantBhardwaj07", "RegisterDate": "12/20/2021", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df_dom = pd.read_csv( "/kaggle/input/airindia-monthly-passenger-traffic/AirIndia (Domestic).csv" ) df_dom.head() df_dom.shape df_dom.info() df_dom.isna().sum() df_dom.fillna(0) num_cols = df_dom.select_dtypes(include=np.number).columns.tolist() print("Numerical columns are: ", num_cols) import matplotlib.pyplot as plt import seaborn as sns df_dom_monthly = df_dom.groupby("Month").mean() df_dom_monthly for col in df_dom_monthly: df_dom_monthly[col] = round(df_dom_monthly[col], 2) for col in df_dom_monthly: fig, ax = plt.subplots(1, 1, figsize=(15, 10)) plt.title(f"Month vs {col}") sns.barplot(x=df_dom_monthly.index, y=df_dom_monthly[col], palette="mako") for index in range(len(df_dom_monthly.index)): ax.text( index, df_dom_monthly.iloc[index][col], df_dom_monthly.iloc[index][col], ha="center", ) diff = max(df_dom_monthly[col]) - min(df_dom_monthly[col]) diff = round(diff, 2) ax.text( 1.32, 0.5, f"Diff betn best and worst performing month: {diff}", fontsize=20, horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, ) # * **There seems to be some drawdown in April that has caused poor performance overall** # * **September is seen as the best performer in all terms** df_dom_year = df_dom.groupby("FY").sum() df_dom_year df_dom_year[" PAX. LOAD FACTOR#\n(IN %)"] = df_dom.groupby("FY")[ " PAX. LOAD FACTOR#\n(IN %)" ].mean() df_dom_year[" PAX. LOAD FACTOR#\n(IN %)"] = round( df_dom_year[" PAX. LOAD FACTOR#\n(IN %)"], 2 ) df_dom_year for col in df_dom_year: fig, ax = plt.subplots(1, 1, figsize=(15, 10)) plt.title(f"Month vs {col}") sns.barplot(x=df_dom_year.index, y=df_dom_year[col], palette="mako") for index in range(len(df_dom_year.index)): ax.text( index, df_dom_year.iloc[index][col], df_dom_year.iloc[index][col], ha="center", ) drop = ( (df_dom_year.iloc[6][col] - df_dom_year.iloc[7][col]) / df_dom_year.iloc[6][col] * 100 ) # FY20 and FY21 drop = round(drop, 2) plt.text( 1.3, 0.2, f"Percentage decrease due to COVID19: {drop}", fontsize=20, fontweight="bold", bbox=dict(facecolor="red", alpha=0.5), horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, ) recov = ( (df_dom_year.iloc[9][col] - df_dom_year.iloc[7][col]) / df_dom_year.iloc[7][col] * 100 ) # FY23 and #FY21 recov = round(recov, 2) plt.text( 1.3, 0.4, f"Percentage increase since COVID19: {recov}", fontsize=20, fontweight="bold", bbox=dict(facecolor="green", alpha=0.5), horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, ) pre = ( (df_dom_year.iloc[6][col] - df_dom_year.iloc[9][col]) / df_dom_year.iloc[6][col] * 100 ) pre = round(pre, 2) plt.text( 1.4, 0.6, f"Percentage increase to attain pre-COVID19 levels: {pre}", fontsize=20, fontweight="bold", bbox=dict(facecolor="blue", alpha=0.5), horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, )
false
1
1,344
1
1,763
1,344
129082880
# Короткий зміст. # У цій роботі необхідно побудувати нейронну мережу з одним прихованим шаром (як у роботі № 3) і застосувати її для датасета про котиків (з роботи № 2). Можна і потрібно використовувати код із цих робіт. # Більш детальний опис роботи. # 1. Архітектура мережі: вхідний шар (розмір визначається розмірами картинок) -> прихований шар (розмір підібрати експериментально) + ФА_1 -> вихідний шар (розмір визначається розв'язуваною задачею) + ФА_2, де # ФА_1 - функція активації прихованого шару; розглянути два варіанти - tanh і ReLU; # ФА_2 - функція активації вихідного шару (визначається завданням). # 2. Експерименти та графіки: # 2.1 Як змінюється cost за ітераціями з фіксованим learning_rate (значення підбираються самостійно) + графік; # 2.2 Як змінюється cost за ітераціями з різними learning_rate (3-4 різні значення різного порядку) + графік; # 2.3 Як впливає тип ініціалізації параметрів (нулями або як у роботі № 3) на результати; # 2.4 Як впливає розмір прихованого шару результати; # 2.5 Як впливає функція активації прихованого шару результати. # # Package imports import numpy as np import matplotlib.pyplot as plt # from testCases_v2 import * import sklearn import sklearn.datasets import sklearn.linear_model import h5py import scipy from PIL import Image from scipy import ndimage def sigmoid(x): """ Compute the sigmoid of x Arguments: x -- A scalar or numpy array of any size. Return: s -- sigmoid(x) """ s = 1 / (1 + np.exp(-x)) return s # Loading the dataset def load_dataset(): train_dataset = h5py.File("../input/cat-images-dataset/train_catvnoncat.h5", "r") train_set_x_orig = np.array(train_dataset["train_set_x"][:]) train_set_y_orig = np.array(train_dataset["train_set_y"][:]) test_dataset = h5py.File("../input/cat-images-dataset/test_catvnoncat.h5", "r") test_set_x_orig = np.array(test_dataset["test_set_x"][:]) test_set_y_orig = np.array(test_dataset["test_set_y"][:]) classes = np.array(test_dataset["list_classes"][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes def get_accuracy(model_results): predictions = predict(model_results["parameters"], test_set_x) print("Accuracy: {:.0%}".format(1 - np.mean(np.abs(predictions - test_set_y)))) train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset() # Example of a picture index = 36 plt.imshow(train_set_x_orig[index]) print( "y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture." ) ### START CODE HERE ### (≈ 3 lines of code) m_train = train_set_x_orig.shape[0] m_test = test_set_x_orig.shape[0] num_px = train_set_x_orig.shape[1] ### END CODE HERE ### print("Number of training examples: m_train = " + str(m_train)) print("Number of testing examples: m_test = " + str(m_test)) print("Height/Width of each image: num_px = " + str(num_px)) print("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)") print("train_set_x shape: " + str(train_set_x_orig.shape)) print("train_set_y shape: " + str(train_set_y.shape)) print("test_set_x shape: " + str(test_set_x_orig.shape)) print("test_set_y shape: " + str(test_set_y.shape)) # Reshape the training and test examples ### START CODE HERE ### (≈ 2 lines of code) train_set_x_flatten = train_set_x_orig.reshape(m_train, -1).T test_set_x_flatten = test_set_x_orig.reshape(m_test, -1).T ### END CODE HERE ### print("train_set_x_flatten shape: " + str(train_set_x_flatten.shape)) print("train_set_y shape: " + str(train_set_y.shape)) print("test_set_x_flatten shape: " + str(test_set_x_flatten.shape)) print("test_set_y shape: " + str(test_set_y.shape)) print("sanity check after reshaping: " + str(train_set_x_flatten[0:5, 0])) train_set_x = train_set_x_flatten / 255.0 test_set_x = test_set_x_flatten / 255.0 print(train_set_y) # ## Simple logistic regression # Train the logistic regression classifier. clf = sklearn.linear_model.LogisticRegressionCV(max_iter=1000) clf.fit(train_set_x.T, np.ravel(train_set_y).T) predicted_y = clf.predict(test_set_x.T) print( "test accuracy: {} %".format(100 - np.mean(np.abs(predicted_y - test_set_y)) * 100) ) # ## Neural network # ## Defining the neural network structure def layer_sizes(X, Y, hidden=4): """ Arguments: X -- input dataset of shape (input size, number of examples) Y -- labels of shape (output size, number of examples) Returns: n_x -- the size of the input layer n_h -- the size of the hidden layer n_y -- the size of the output layer """ ### START CODE HERE ### (≈ 3 lines of code) n_x = X.shape[0] # size of input layer n_h = hidden n_y = Y.shape[0] # size of output layer ### END CODE HERE ### return (n_x, n_h, n_y) # X_assess, Y_assess = layer_sizes_test_case() (n_x, n_h, n_y) = layer_sizes(train_set_x, train_set_y) print("The size of the input layer is: n_x = " + str(n_x)) print("The size of the hidden layer is: n_h = " + str(n_h)) print("The size of the output layer is: n_y = " + str(n_y)) # ## Initialize the model's parameters def initialize_parameters(n_x, n_h, n_y, put_random_data=True): """ Argument: n_x -- size of the input layer n_h -- size of the hidden layer n_y -- size of the output layer Returns: params -- python dictionary containing your parameters: W1 -- weight matrix of shape (n_h, n_x) b1 -- bias vector of shape (n_h, 1) W2 -- weight matrix of shape (n_y, n_h) b2 -- bias vector of shape (n_y, 1) """ np.random.seed( 2 ) # we set up a seed so that your output matches ours although the initialization is random. ### START CODE HERE ### (≈ 4 lines of code) W1 = np.random.randn(n_h, n_x) * 0.01 if put_random_data else np.zeros((n_h, n_x)) b1 = np.zeros((n_h, 1)) W2 = np.random.randn(n_y, n_h) * 0.01 if put_random_data else np.zeros((n_y, n_h)) b2 = np.zeros((n_y, 1)) ### END CODE HERE ### assert W1.shape == (n_h, n_x) assert b1.shape == (n_h, 1) assert W2.shape == (n_y, n_h) assert b2.shape == (n_y, 1) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(n_x, n_h, n_y) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) # ## The Loop # GRADED FUNCTION: forward_propagation def forward_propagation(X, parameters, activator="tanh"): """ Argument: X -- input data of size (n_x, m) parameters -- python dictionary containing your parameters (output of initialization function) Returns: A2 -- The sigmoid output of the second activation cache -- a dictionary containing "Z1", "A1", "Z2" and "A2" """ W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] activators = {"tanh": np.tanh, "relu": lambda x: np.maximum(x, 0.1)} curr_activator = activators[activator] Z1 = np.dot(W1, X) + b1 A1 = curr_activator(Z1) Z2 = np.dot(W2, A1) + b2 A2 = sigmoid(Z2) assert A2.shape == (1, X.shape[1]) cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2} return A2, cache A2, cache = forward_propagation(train_set_x, parameters, activator="relu") # Note: we use the mean here just to make sure that your output matches ours. print( np.mean(cache["Z1"]), np.mean(cache["A1"]), np.mean(cache["Z2"]), np.mean(cache["A2"]), ) def compute_cost(A2, Y, parameters): """ Computes the cross-entropy cost given in equation (13) Arguments: A2 -- The sigmoid output of the second activation, of shape (1, number of examples) Y -- "true" labels vector of shape (1, number of examples) parameters -- python dictionary containing your parameters W1, b1, W2 and b2 [Note that the parameters argument is not used in this function, but the auto-grader currently expects this parameter. Future version of this notebook will fix both the notebook and the auto-grader so that `parameters` is not needed. For now, please include `parameters` in the function signature, and also when invoking this function.] Returns: cost -- cross-entropy cost given equation (13) """ m = Y.shape[1] # number of example # Compute the cross-entropy cost ### START CODE HERE ### (≈ 2 lines of code) logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), 1 - Y) cost = -np.sum(logprobs) / m ### END CODE HERE ### cost = float(np.squeeze(cost)) # makes sure cost is the dimension we expect. # E.g., turns [[17]] into 17 assert isinstance(cost, float) return cost print("cost = " + str(compute_cost(A2, train_set_y, parameters))) def backward_propagation(parameters, cache, X, Y): """ Implement the backward propagation using the instructions above. Arguments: parameters -- python dictionary containing our parameters cache -- a dictionary containing "Z1", "A1", "Z2" and "A2". X -- input data of shape (2, number of examples) Y -- "true" labels vector of shape (1, number of examples) Returns: grads -- python dictionary containing your gradients with respect to different parameters """ m = X.shape[1] # First, retrieve W1 and W2 from the dictionary "parameters". ### START CODE HERE ### (≈ 2 lines of code) W1 = parameters["W1"] W2 = parameters["W2"] ### END CODE HERE ### # Retrieve also A1 and A2 from dictionary "cache". ### START CODE HERE ### (≈ 2 lines of code) A1 = cache["A1"] A2 = cache["A2"] ### END CODE HERE ### # Backward propagation: calculate dW1, db1, dW2, db2. ### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above) dZ2 = A2 - Y dW2 = (1 / m) * np.dot(dZ2, A1.T) db2 = (1 / m) * np.sum(dZ2, axis=1, keepdims=True) dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2)) dW1 = (1 / m) * np.dot(dZ1, X.T) db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True) ### END CODE HERE ### grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2} return grads grads = backward_propagation(parameters, cache, train_set_x, train_set_y) print("dW1 = " + str(grads["dW1"])) print("db1 = " + str(grads["db1"])) print("dW2 = " + str(grads["dW2"])) print("db2 = " + str(grads["db2"])) def update_parameters(parameters, grads, learning_rate=1.2): """ Updates parameters using the gradient descent update rule given above Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients Returns: parameters -- python dictionary containing your updated parameters """ # Retrieve each parameter from the dictionary "parameters" ### START CODE HERE ### (≈ 4 lines of code) W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] ### END CODE HERE ### # Retrieve each gradient from the dictionary "grads" ### START CODE HERE ### (≈ 4 lines of code) dW1 = grads["dW1"] db1 = grads["db1"] dW2 = grads["dW2"] db2 = grads["db2"] ## END CODE HERE ### # Update rule for each parameter ### START CODE HERE ### (≈ 4 lines of code) W1 = W1 - learning_rate * dW1 b1 = b1 - learning_rate * db1 W2 = W2 - learning_rate * dW2 b2 = b2 - learning_rate * db2 ### END CODE HERE ### parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = update_parameters(parameters, grads) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) # ## Integrate parts in nn_model() def nn_model( X, Y, n_h=100, num_iterations=10000, print_cost=False, init_with_random_data=True, hidden_activator="tanh", learning_rate=1.2, ): """ Arguments: X -- dataset of shape (2, number of examples) Y -- labels of shape (1, number of examples) n_h -- size of the hidden layer num_iterations -- Number of iterations in gradient descent loop print_cost -- if True, print the cost every 1000 iterations Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ np.random.seed(3) n_x = layer_sizes(X, Y)[0] n_y = layer_sizes(X, Y)[2] # Initialize parameters parameters = initialize_parameters( n_x, n_h, n_y, put_random_data=init_with_random_data ) costs = [] # Loop (gradient descent) for i in range(0, num_iterations): ### START CODE HERE ### (≈ 4 lines of code) # Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache". A2, cache = forward_propagation(X, parameters, activator=hidden_activator) # Cost function. Inputs: "A2, Y, parameters". Outputs: "cost". cost = compute_cost(A2, Y, parameters) # Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads". grads = backward_propagation(parameters, cache, X, Y) # Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters". parameters = update_parameters(parameters, grads, learning_rate=learning_rate) costs.append(cost) ### END CODE HERE ### # Print the cost every 1000 iterations if print_cost and i % 1000 == 0: print("Cost after iteration %i: %f" % (i, cost)) result = { "costs": costs, "parameters": parameters, "learning_rate": learning_rate, "num_iterations": num_iterations, } return result result = nn_model(train_set_x, train_set_y, 4, num_iterations=10000, print_cost=True) parameters = result["parameters"] print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) # ## Prediction def predict(parameters, X): """ Using the learned parameters, predicts a class for each example in X Arguments: parameters -- python dictionary containing your parameters X -- input data of size (n_x, m) Returns predictions -- vector of predictions of our model (red: 0 / blue: 1) """ # Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold. ### START CODE HERE ### (≈ 2 lines of code) A2, cache = forward_propagation(X, parameters) predictions = (A2 > 0.5).astype("int") ### END CODE HERE ### return predictions predictions = predict(parameters, test_set_x) print("predictions mean = " + str(np.mean(predictions))) # ## Building a model # Build a model with a n_h-dimensional hidden layer result = nn_model( train_set_x, train_set_y, n_h=50, num_iterations=2000, print_cost=True, init_with_random_data=True, learning_rate=0.01, ) get_accuracy(result) # ## Experiments with values # ### Як змінюється cost за ітераціями з фіксованим learning_rate (значення підбираються самостійно) + графік; learning_rate = 0.01 print("learning rate is: " + str(learning_rate)) curr_model = nn_model( train_set_x, train_set_y, num_iterations=2000, learning_rate=learning_rate, print_cost=False, ) plt.plot(np.squeeze(curr_model["costs"]), label=str(learning_rate)) plt.ylabel("cost") plt.xlabel("iterations (hundreds)") legend = plt.legend(loc="upper center", shadow=True) frame = legend.get_frame() frame.set_facecolor("0.90") plt.show() # ![image.png](attachment:d626d7b6-04a5-4768-9aef-f0e38d4d2428.png) # ### Як змінюється cost за ітераціями з різними learning_rate (3-4 різні значення різного порядку) + графік; learning_rates = [0.01, 0.001, 0.0001] models = {} for i in learning_rates: print("learning rate is: " + str(i)) models[str(i)] = nn_model( train_set_x, train_set_y, num_iterations=2000, learning_rate=i, print_cost=False ) print("\n" + "-------------------------------------------------------" + "\n") for i in learning_rates: plt.plot( np.squeeze(models[str(i)]["costs"]), label=str(models[str(i)]["learning_rate"]) ) plt.ylabel("cost") plt.xlabel("iterations (hundreds)") legend = plt.legend(loc="upper center", shadow=True) frame = legend.get_frame() frame.set_facecolor("0.90") plt.show() # ![image.png](attachment:82809ab8-6c48-41c8-88df-9f4c1314d24b.png) # ### Як впливає тип ініціалізації параметрів (нулями або як у роботі № 3) на результати; init_types = [True, False] for init_type in init_types: print("init_type is: " + str(init_type)) curr_model = nn_model( train_set_x, train_set_y, num_iterations=1000, learning_rate=0.1, print_cost=False, init_with_random_data=init_type, ) get_accuracy(curr_model) print("\n" + "-------------------------------------------------------" + "\n") # init_type is: True - random data # Accuracy: 72% # ------------------------------------------------------- # init_type is: False - zeros # Accuracy: 34% # ------------------------------------------------------- # ### Як впливає розмір прихованого шару результати; sizes = [1, 4, 5, 10, 100, 1000] for i in sizes: print("nh size is: " + str(i)) curr_model = nn_model( train_set_x, train_set_y, num_iterations=2000, learning_rate=0.01, print_cost=False, n_h=i, ) get_accuracy(curr_model) print("\n" + "-------------------------------------------------------" + "\n") # nh size is: 1 # Accuracy: 68% # ------------------------------------------------------- # nh size is: 4 # Accuracy: 68% # ------------------------------------------------------- # nh size is: 10 # Accuracy: 72% # ------------------------------------------------------- # nh size is: 100 # Accuracy: 72% # ------------------------------------------------------- # nh size is: 1000 # Accuracy: 72% # ------------------------------------------------------- # ### Як впливає функція активації прихованого шару результати. activators = ["tanh", "relu"] for i in activators: print("activator is: " + str(i)) curr_model = nn_model( train_set_x, train_set_y, num_iterations=500, learning_rate=0.01, print_cost=False, hidden_activator=i, n_h=10, ) get_accuracy(curr_model) print("\n" + "-------------------------------------------------------" + "\n")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/082/129082880.ipynb
null
null
[{"Id": 129082880, "ScriptId": 38373228, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11906928, "CreationDate": "05/10/2023 21:31:43", "VersionNumber": 1.0, "Title": "homework 4 v2", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 574.0, "LinesInsertedFromPrevious": 574.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Короткий зміст. # У цій роботі необхідно побудувати нейронну мережу з одним прихованим шаром (як у роботі № 3) і застосувати її для датасета про котиків (з роботи № 2). Можна і потрібно використовувати код із цих робіт. # Більш детальний опис роботи. # 1. Архітектура мережі: вхідний шар (розмір визначається розмірами картинок) -> прихований шар (розмір підібрати експериментально) + ФА_1 -> вихідний шар (розмір визначається розв'язуваною задачею) + ФА_2, де # ФА_1 - функція активації прихованого шару; розглянути два варіанти - tanh і ReLU; # ФА_2 - функція активації вихідного шару (визначається завданням). # 2. Експерименти та графіки: # 2.1 Як змінюється cost за ітераціями з фіксованим learning_rate (значення підбираються самостійно) + графік; # 2.2 Як змінюється cost за ітераціями з різними learning_rate (3-4 різні значення різного порядку) + графік; # 2.3 Як впливає тип ініціалізації параметрів (нулями або як у роботі № 3) на результати; # 2.4 Як впливає розмір прихованого шару результати; # 2.5 Як впливає функція активації прихованого шару результати. # # Package imports import numpy as np import matplotlib.pyplot as plt # from testCases_v2 import * import sklearn import sklearn.datasets import sklearn.linear_model import h5py import scipy from PIL import Image from scipy import ndimage def sigmoid(x): """ Compute the sigmoid of x Arguments: x -- A scalar or numpy array of any size. Return: s -- sigmoid(x) """ s = 1 / (1 + np.exp(-x)) return s # Loading the dataset def load_dataset(): train_dataset = h5py.File("../input/cat-images-dataset/train_catvnoncat.h5", "r") train_set_x_orig = np.array(train_dataset["train_set_x"][:]) train_set_y_orig = np.array(train_dataset["train_set_y"][:]) test_dataset = h5py.File("../input/cat-images-dataset/test_catvnoncat.h5", "r") test_set_x_orig = np.array(test_dataset["test_set_x"][:]) test_set_y_orig = np.array(test_dataset["test_set_y"][:]) classes = np.array(test_dataset["list_classes"][:]) train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes def get_accuracy(model_results): predictions = predict(model_results["parameters"], test_set_x) print("Accuracy: {:.0%}".format(1 - np.mean(np.abs(predictions - test_set_y)))) train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset() # Example of a picture index = 36 plt.imshow(train_set_x_orig[index]) print( "y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture." ) ### START CODE HERE ### (≈ 3 lines of code) m_train = train_set_x_orig.shape[0] m_test = test_set_x_orig.shape[0] num_px = train_set_x_orig.shape[1] ### END CODE HERE ### print("Number of training examples: m_train = " + str(m_train)) print("Number of testing examples: m_test = " + str(m_test)) print("Height/Width of each image: num_px = " + str(num_px)) print("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)") print("train_set_x shape: " + str(train_set_x_orig.shape)) print("train_set_y shape: " + str(train_set_y.shape)) print("test_set_x shape: " + str(test_set_x_orig.shape)) print("test_set_y shape: " + str(test_set_y.shape)) # Reshape the training and test examples ### START CODE HERE ### (≈ 2 lines of code) train_set_x_flatten = train_set_x_orig.reshape(m_train, -1).T test_set_x_flatten = test_set_x_orig.reshape(m_test, -1).T ### END CODE HERE ### print("train_set_x_flatten shape: " + str(train_set_x_flatten.shape)) print("train_set_y shape: " + str(train_set_y.shape)) print("test_set_x_flatten shape: " + str(test_set_x_flatten.shape)) print("test_set_y shape: " + str(test_set_y.shape)) print("sanity check after reshaping: " + str(train_set_x_flatten[0:5, 0])) train_set_x = train_set_x_flatten / 255.0 test_set_x = test_set_x_flatten / 255.0 print(train_set_y) # ## Simple logistic regression # Train the logistic regression classifier. clf = sklearn.linear_model.LogisticRegressionCV(max_iter=1000) clf.fit(train_set_x.T, np.ravel(train_set_y).T) predicted_y = clf.predict(test_set_x.T) print( "test accuracy: {} %".format(100 - np.mean(np.abs(predicted_y - test_set_y)) * 100) ) # ## Neural network # ## Defining the neural network structure def layer_sizes(X, Y, hidden=4): """ Arguments: X -- input dataset of shape (input size, number of examples) Y -- labels of shape (output size, number of examples) Returns: n_x -- the size of the input layer n_h -- the size of the hidden layer n_y -- the size of the output layer """ ### START CODE HERE ### (≈ 3 lines of code) n_x = X.shape[0] # size of input layer n_h = hidden n_y = Y.shape[0] # size of output layer ### END CODE HERE ### return (n_x, n_h, n_y) # X_assess, Y_assess = layer_sizes_test_case() (n_x, n_h, n_y) = layer_sizes(train_set_x, train_set_y) print("The size of the input layer is: n_x = " + str(n_x)) print("The size of the hidden layer is: n_h = " + str(n_h)) print("The size of the output layer is: n_y = " + str(n_y)) # ## Initialize the model's parameters def initialize_parameters(n_x, n_h, n_y, put_random_data=True): """ Argument: n_x -- size of the input layer n_h -- size of the hidden layer n_y -- size of the output layer Returns: params -- python dictionary containing your parameters: W1 -- weight matrix of shape (n_h, n_x) b1 -- bias vector of shape (n_h, 1) W2 -- weight matrix of shape (n_y, n_h) b2 -- bias vector of shape (n_y, 1) """ np.random.seed( 2 ) # we set up a seed so that your output matches ours although the initialization is random. ### START CODE HERE ### (≈ 4 lines of code) W1 = np.random.randn(n_h, n_x) * 0.01 if put_random_data else np.zeros((n_h, n_x)) b1 = np.zeros((n_h, 1)) W2 = np.random.randn(n_y, n_h) * 0.01 if put_random_data else np.zeros((n_y, n_h)) b2 = np.zeros((n_y, 1)) ### END CODE HERE ### assert W1.shape == (n_h, n_x) assert b1.shape == (n_h, 1) assert W2.shape == (n_y, n_h) assert b2.shape == (n_y, 1) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(n_x, n_h, n_y) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) # ## The Loop # GRADED FUNCTION: forward_propagation def forward_propagation(X, parameters, activator="tanh"): """ Argument: X -- input data of size (n_x, m) parameters -- python dictionary containing your parameters (output of initialization function) Returns: A2 -- The sigmoid output of the second activation cache -- a dictionary containing "Z1", "A1", "Z2" and "A2" """ W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] activators = {"tanh": np.tanh, "relu": lambda x: np.maximum(x, 0.1)} curr_activator = activators[activator] Z1 = np.dot(W1, X) + b1 A1 = curr_activator(Z1) Z2 = np.dot(W2, A1) + b2 A2 = sigmoid(Z2) assert A2.shape == (1, X.shape[1]) cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2} return A2, cache A2, cache = forward_propagation(train_set_x, parameters, activator="relu") # Note: we use the mean here just to make sure that your output matches ours. print( np.mean(cache["Z1"]), np.mean(cache["A1"]), np.mean(cache["Z2"]), np.mean(cache["A2"]), ) def compute_cost(A2, Y, parameters): """ Computes the cross-entropy cost given in equation (13) Arguments: A2 -- The sigmoid output of the second activation, of shape (1, number of examples) Y -- "true" labels vector of shape (1, number of examples) parameters -- python dictionary containing your parameters W1, b1, W2 and b2 [Note that the parameters argument is not used in this function, but the auto-grader currently expects this parameter. Future version of this notebook will fix both the notebook and the auto-grader so that `parameters` is not needed. For now, please include `parameters` in the function signature, and also when invoking this function.] Returns: cost -- cross-entropy cost given equation (13) """ m = Y.shape[1] # number of example # Compute the cross-entropy cost ### START CODE HERE ### (≈ 2 lines of code) logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), 1 - Y) cost = -np.sum(logprobs) / m ### END CODE HERE ### cost = float(np.squeeze(cost)) # makes sure cost is the dimension we expect. # E.g., turns [[17]] into 17 assert isinstance(cost, float) return cost print("cost = " + str(compute_cost(A2, train_set_y, parameters))) def backward_propagation(parameters, cache, X, Y): """ Implement the backward propagation using the instructions above. Arguments: parameters -- python dictionary containing our parameters cache -- a dictionary containing "Z1", "A1", "Z2" and "A2". X -- input data of shape (2, number of examples) Y -- "true" labels vector of shape (1, number of examples) Returns: grads -- python dictionary containing your gradients with respect to different parameters """ m = X.shape[1] # First, retrieve W1 and W2 from the dictionary "parameters". ### START CODE HERE ### (≈ 2 lines of code) W1 = parameters["W1"] W2 = parameters["W2"] ### END CODE HERE ### # Retrieve also A1 and A2 from dictionary "cache". ### START CODE HERE ### (≈ 2 lines of code) A1 = cache["A1"] A2 = cache["A2"] ### END CODE HERE ### # Backward propagation: calculate dW1, db1, dW2, db2. ### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above) dZ2 = A2 - Y dW2 = (1 / m) * np.dot(dZ2, A1.T) db2 = (1 / m) * np.sum(dZ2, axis=1, keepdims=True) dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2)) dW1 = (1 / m) * np.dot(dZ1, X.T) db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True) ### END CODE HERE ### grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2} return grads grads = backward_propagation(parameters, cache, train_set_x, train_set_y) print("dW1 = " + str(grads["dW1"])) print("db1 = " + str(grads["db1"])) print("dW2 = " + str(grads["dW2"])) print("db2 = " + str(grads["db2"])) def update_parameters(parameters, grads, learning_rate=1.2): """ Updates parameters using the gradient descent update rule given above Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients Returns: parameters -- python dictionary containing your updated parameters """ # Retrieve each parameter from the dictionary "parameters" ### START CODE HERE ### (≈ 4 lines of code) W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] ### END CODE HERE ### # Retrieve each gradient from the dictionary "grads" ### START CODE HERE ### (≈ 4 lines of code) dW1 = grads["dW1"] db1 = grads["db1"] dW2 = grads["dW2"] db2 = grads["db2"] ## END CODE HERE ### # Update rule for each parameter ### START CODE HERE ### (≈ 4 lines of code) W1 = W1 - learning_rate * dW1 b1 = b1 - learning_rate * db1 W2 = W2 - learning_rate * dW2 b2 = b2 - learning_rate * db2 ### END CODE HERE ### parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = update_parameters(parameters, grads) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) # ## Integrate parts in nn_model() def nn_model( X, Y, n_h=100, num_iterations=10000, print_cost=False, init_with_random_data=True, hidden_activator="tanh", learning_rate=1.2, ): """ Arguments: X -- dataset of shape (2, number of examples) Y -- labels of shape (1, number of examples) n_h -- size of the hidden layer num_iterations -- Number of iterations in gradient descent loop print_cost -- if True, print the cost every 1000 iterations Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ np.random.seed(3) n_x = layer_sizes(X, Y)[0] n_y = layer_sizes(X, Y)[2] # Initialize parameters parameters = initialize_parameters( n_x, n_h, n_y, put_random_data=init_with_random_data ) costs = [] # Loop (gradient descent) for i in range(0, num_iterations): ### START CODE HERE ### (≈ 4 lines of code) # Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache". A2, cache = forward_propagation(X, parameters, activator=hidden_activator) # Cost function. Inputs: "A2, Y, parameters". Outputs: "cost". cost = compute_cost(A2, Y, parameters) # Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads". grads = backward_propagation(parameters, cache, X, Y) # Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters". parameters = update_parameters(parameters, grads, learning_rate=learning_rate) costs.append(cost) ### END CODE HERE ### # Print the cost every 1000 iterations if print_cost and i % 1000 == 0: print("Cost after iteration %i: %f" % (i, cost)) result = { "costs": costs, "parameters": parameters, "learning_rate": learning_rate, "num_iterations": num_iterations, } return result result = nn_model(train_set_x, train_set_y, 4, num_iterations=10000, print_cost=True) parameters = result["parameters"] print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) # ## Prediction def predict(parameters, X): """ Using the learned parameters, predicts a class for each example in X Arguments: parameters -- python dictionary containing your parameters X -- input data of size (n_x, m) Returns predictions -- vector of predictions of our model (red: 0 / blue: 1) """ # Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold. ### START CODE HERE ### (≈ 2 lines of code) A2, cache = forward_propagation(X, parameters) predictions = (A2 > 0.5).astype("int") ### END CODE HERE ### return predictions predictions = predict(parameters, test_set_x) print("predictions mean = " + str(np.mean(predictions))) # ## Building a model # Build a model with a n_h-dimensional hidden layer result = nn_model( train_set_x, train_set_y, n_h=50, num_iterations=2000, print_cost=True, init_with_random_data=True, learning_rate=0.01, ) get_accuracy(result) # ## Experiments with values # ### Як змінюється cost за ітераціями з фіксованим learning_rate (значення підбираються самостійно) + графік; learning_rate = 0.01 print("learning rate is: " + str(learning_rate)) curr_model = nn_model( train_set_x, train_set_y, num_iterations=2000, learning_rate=learning_rate, print_cost=False, ) plt.plot(np.squeeze(curr_model["costs"]), label=str(learning_rate)) plt.ylabel("cost") plt.xlabel("iterations (hundreds)") legend = plt.legend(loc="upper center", shadow=True) frame = legend.get_frame() frame.set_facecolor("0.90") plt.show() # ![image.png](attachment:d626d7b6-04a5-4768-9aef-f0e38d4d2428.png) # ### Як змінюється cost за ітераціями з різними learning_rate (3-4 різні значення різного порядку) + графік; learning_rates = [0.01, 0.001, 0.0001] models = {} for i in learning_rates: print("learning rate is: " + str(i)) models[str(i)] = nn_model( train_set_x, train_set_y, num_iterations=2000, learning_rate=i, print_cost=False ) print("\n" + "-------------------------------------------------------" + "\n") for i in learning_rates: plt.plot( np.squeeze(models[str(i)]["costs"]), label=str(models[str(i)]["learning_rate"]) ) plt.ylabel("cost") plt.xlabel("iterations (hundreds)") legend = plt.legend(loc="upper center", shadow=True) frame = legend.get_frame() frame.set_facecolor("0.90") plt.show() # ![image.png](attachment:82809ab8-6c48-41c8-88df-9f4c1314d24b.png) # ### Як впливає тип ініціалізації параметрів (нулями або як у роботі № 3) на результати; init_types = [True, False] for init_type in init_types: print("init_type is: " + str(init_type)) curr_model = nn_model( train_set_x, train_set_y, num_iterations=1000, learning_rate=0.1, print_cost=False, init_with_random_data=init_type, ) get_accuracy(curr_model) print("\n" + "-------------------------------------------------------" + "\n") # init_type is: True - random data # Accuracy: 72% # ------------------------------------------------------- # init_type is: False - zeros # Accuracy: 34% # ------------------------------------------------------- # ### Як впливає розмір прихованого шару результати; sizes = [1, 4, 5, 10, 100, 1000] for i in sizes: print("nh size is: " + str(i)) curr_model = nn_model( train_set_x, train_set_y, num_iterations=2000, learning_rate=0.01, print_cost=False, n_h=i, ) get_accuracy(curr_model) print("\n" + "-------------------------------------------------------" + "\n") # nh size is: 1 # Accuracy: 68% # ------------------------------------------------------- # nh size is: 4 # Accuracy: 68% # ------------------------------------------------------- # nh size is: 10 # Accuracy: 72% # ------------------------------------------------------- # nh size is: 100 # Accuracy: 72% # ------------------------------------------------------- # nh size is: 1000 # Accuracy: 72% # ------------------------------------------------------- # ### Як впливає функція активації прихованого шару результати. activators = ["tanh", "relu"] for i in activators: print("activator is: " + str(i)) curr_model = nn_model( train_set_x, train_set_y, num_iterations=500, learning_rate=0.01, print_cost=False, hidden_activator=i, n_h=10, ) get_accuracy(curr_model) print("\n" + "-------------------------------------------------------" + "\n")
false
0
6,585
0
6,585
6,585
129082804
<jupyter_start><jupyter_text>Cancer Data **570 cancer cells and 30 features to determine whether the cancer cells in our data are benign or malignant** **Our cancer data contains 2 types of cancers: 1. benign cancer (B) and 2. malignant cancer (M).** Kaggle dataset identifier: cancer-data <jupyter_script># # Benign or Malignant Cancer Cells Based on Attributes # _Group Members:_ # - Eli # - Jonathan # - Xiaofan # [Data Set Here](https://www.kaggle.com/datasets/erdemtaha/cancer-data) import pandas as pd import matplotlib.pyplot as plt import numpy as np import sklearn as sk from sklearn.model_selection import train_test_split # Load the dataset df = pd.read_csv(r"/kaggle/input/cancer-data/Cancer_Data.csv") # print(df.head()) print(df.shape) # # The Dataset # 569 cancer cells and 33 features to determine whether the cancer cells in our data are benign or malignant. 30 of these 33 features are all numerical features making this problem suitable for a NN. Of these 569 cells, 357 of them (62.74%) are labeled as Benign and 212 of them (37.26%) are Malignant. # Before we begin training our model, we must clean up the data. We start by removing the `id` column as it doesn't provide any useful information. Next, we map the `diagnosis` attribute to 1 for a Malignant tumor and 0 for a Benign Tumor. Finally, we remove a random column titled `Unnamed: 32` as it doesn't provide any useful infromation. # We are left with 569 rows, and 31 columns. 1 column is the diagnosis (ouput value), while the other 30 are the statistics about the tumors (input values). data = df.drop("id", axis=1) data["diagnosis"] = df["diagnosis"].map({"M": 1, "B": 0}) data = data.drop("Unnamed: 32", axis=1) print("Shape of Data we are using:", data.shape) # print(data.head()) # The line below prints the amount of Benign and Malignant tumors in our dataset # print(df['diagnosis'].value_counts()) # # Training and Test samples # We are using a 70:30 Train-Test split for our model which is a rather large testing split, but as you will see later, our model has remarkable accuracy so a large testing dataset isn't counter productive. # Set the Random State for our models, this can be changed or random_state = 42 X, y = data.drop("diagnosis", axis=1), data["diagnosis"] X_train, X_test, y_train, y_test = sk.model_selection.train_test_split( X, y, test_size=0.3, random_state=random_state ) # Scale the Training and Testing Data # This scales all of the data to have a standard deviation of 1 and mean 0 # (Standardizes the data so its a Z score) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.fit_transform(X_test) # print(X_train) # print(y_train) # # This is a diagram of the layout of the model we are using. It has 30 input neurons and 2 hidden layers with 15 neurons each. The ouput layer is 1 neuron because it is a binary classifier (Malignant or Benign). # # Trained on Scaled Data from sklearn.neural_network import MLPClassifier # Create an MLP object with 2 hidden layers, each with 16 neurons mlp = MLPClassifier( hidden_layer_sizes=(16, 16), max_iter=1000, activation="relu", solver="adam", batch_size=32, random_state=random_state, ) # Train the MLP using the input and output datasets model = mlp.fit(X_train, y_train) # Plot the training and validation loss plt.plot(model.loss_curve_, label="Training loss") if mlp.validation_scores_ != None: plt.plot(mlp.validation_scores_) plt.title("Loss Curve") plt.xlabel("Epochs") plt.ylabel("Loss") plt.legend() plt.xlim(0, 400) plt.show() accuracy = mlp.score(X_test, y_test) print(f"Accuracy: {accuracy}") # The loss curve is a graphical representation of the performance of a machine learning model during the training process. In this case, the curve shows that the loss (also known as error) of the model is continuously decreasing over time. This is a good sign, as it indicates that the model is improving in accuracy and becoming more effective at making predictions with each epoch. The line starts at around ~.7, showing it has ~30% accuracy at the start of training and it ends at ~.02 showing it has ~98% accruacy by the end of training. # # Plotting Learning Curve from sklearn.model_selection import learning_curve # do 10 fold cross validation (cv = 10) train_sizes, train_scores, test_scores = learning_curve( model, X, y, train_sizes=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1], cv=10, n_jobs=-1, ) # Compute the mean and standard deviation of the train and test scores train_scores_mean = train_scores.mean(axis=1) train_scores_std = train_scores.std(axis=1) test_scores_mean = test_scores.mean(axis=1) test_scores_std = test_scores.std(axis=1) # Plot the learning curve plt.figure(figsize=(8, 6)) plt.title("Learning Curve (MLP)") plt.xlabel("Training Examples") plt.ylabel("Accuracy") plt.grid() plt.fill_between( train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r", ) plt.fill_between( train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="b", ) plt.plot(train_sizes, train_scores_mean, "o-", color="r", label="Training Score") plt.plot(train_sizes, test_scores_mean, "o-", color="b", label="Cross-Validation Score") plt.legend(loc="best") plt.show() # The plot above is a learning curve tested using 10-fold cross validation. # The x-axis represents the number of training examples used in the model's training, while the y-axis represents the accuracy of the model on both the training set (in red) and the cross-validation set (in blue). The learning curve is a graphical representation of how the model's accuracy improves as the number of training examples increases. The shaded regions around the curves show the standard deviation of the accuracy scores. # As we can see from the plot of the learning curve above, our model starts out overfitting as the gap between the training score and cross validation score is very large. As the amount of training examples increases, the gap between the training and cross-validation scores decreases which suggests that the model is becoming less overfit as more data is added. Towards the end of training, the gap between the training score and cross-validation score is very small, suggesting that our model properly fits the dataset. # The accuracy of the model on both the training and cross-validation sets appear to platue at around 300+ training examples which suggests that adding more data beyond this point will not lead to significant improvements in the model's accuracy. # # Confusion Matrix from sklearn.metrics import confusion_matrix # use the model to predict class labels for test data y_pred = mlp.predict(X_test) # generate confusion matrix cm = confusion_matrix(y_test, y_pred) classes = ["Malignant", "Benign"] # plot confusion matrix with labels classes = ["Malignant", "Benign"] fig, ax = plt.subplots() im = ax.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues) ax.figure.colorbar(im, ax=ax, label="Number of Samples") ax.set( xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), xticklabels=classes, yticklabels=classes, title="Confusion matrix", ylabel="True label", xlabel="Predicted label", ) # add text labels to each box thresh = cm.max() / 2 for i in range(cm.shape[0]): for j in range(cm.shape[1]): label = "" if i == 0 and j == 0: label = "TP" elif i == 0 and j == 1: label = "FP" elif i == 1 and j == 0: label = "FN" else: label = "TN" ax.text( j, i, format(label + "\n" + str(cm[i, j]), "s"), ha="center", va="center", color="white" if cm[i, j] > thresh else "black", ) # ax.text(j, i, f"{['TN', 'FP'][j>i]}-{['FN', 'TP'][j>i]}", # ha="center", va="top", fontsize=10, # color="white" if cm[i, j] > thresh else "black") fig.tight_layout() plt.show() # Prints out the FN and FP inputs. # for i in range(len(y_test)): # if y_test.iloc[i] == 1 and y_pred[i] == 0: # print("FN\n", df.iloc[i], '\n') # elif y_test.iloc[i] == 0 and y_pred[i] == 1: # print("FP\n", df.iloc[i], '\n') # The figure above is a confusion matrix # A confusion matrix is a table used to evaluate the performance of a classification model. It compares the predicted and actual classes of a set of test data and displays the results in a matrix format. The matrix consists of four quadrants, where the vertical axis shows the actual class labels, and the horizontal axis shows the predicted class labels. The four quadrants are: # 1) True Positive (TP): The number of correctly predicted positive samples. # 2) False Positive (FP): The number of incorrectly predicted positive samples. # 3) False Negative (FN): The number of incorrectly predicted negative samples. # 4) True Negative (TN): The number of correctly predicted negative samples. # # We can calulate the accuracy of the model with this infomation by dividing the amount of correctly predicted cases to the amount of inccorectly predicted cases # $$ # \frac{T_P+T_N}{T_P+F_P+F_N+T_N} = \frac{107+61}{107+1+2+61} = \frac{168}{171} = .982456 \approx 98.25\% # $$ # weights = model.coefs_[0] # print(model.coefs_[0]) # print(weights.shape) plt.figure(figsize=(12, 8)) plt.imshow(weights, cmap="coolwarm", aspect="auto") plt.colorbar(label="Weight") plt.title("Weights of the first hidden layer") plt.xlabel("Hidden units") x_lab = [i for i in range(1, 17)] plt.yticks(range(len(X.columns)), X.columns) plt.xticks(range(len(x_lab)), x_lab, rotation=0) plt.ylabel("Input features") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/082/129082804.ipynb
cancer-data
erdemtaha
[{"Id": 129082804, "ScriptId": 38372969, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10717068, "CreationDate": "05/10/2023 21:30:34", "VersionNumber": 3.0, "Title": "MLP Classification 97% Accuracy", "EvaluationDate": "05/10/2023", "IsChange": false, "TotalLines": 223.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 223.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184821889, "KernelVersionId": 129082804, "SourceDatasetVersionId": 5212576}]
[{"Id": 5212576, "DatasetId": 3032092, "DatasourceVersionId": 5284991, "CreatorUserId": 2498226, "LicenseName": "Other (specified in description)", "CreationDate": "03/22/2023 07:57:00", "VersionNumber": 1.0, "Title": "Cancer Data", "Slug": "cancer-data", "Subtitle": "Benign and malignant cancer data", "Description": "**570 cancer cells and 30 features to determine whether the cancer cells in our data are benign or malignant**\n\n**Our cancer data contains 2 types of cancers: 1. benign cancer (B) and 2. malignant cancer (M).**", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3032092, "CreatorUserId": 2498226, "OwnerUserId": 2498226.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5212576.0, "CurrentDatasourceVersionId": 5284991.0, "ForumId": 3071494, "Type": 2, "CreationDate": "03/22/2023 07:57:00", "LastActivityDate": "03/22/2023", "TotalViews": 66608, "TotalDownloads": 11493, "TotalVotes": 209, "TotalKernels": 70}]
[{"Id": 2498226, "UserName": "erdemtaha", "DisplayName": "Erdem Taha", "RegisterDate": "11/15/2018", "PerformanceTier": 1}]
# # Benign or Malignant Cancer Cells Based on Attributes # _Group Members:_ # - Eli # - Jonathan # - Xiaofan # [Data Set Here](https://www.kaggle.com/datasets/erdemtaha/cancer-data) import pandas as pd import matplotlib.pyplot as plt import numpy as np import sklearn as sk from sklearn.model_selection import train_test_split # Load the dataset df = pd.read_csv(r"/kaggle/input/cancer-data/Cancer_Data.csv") # print(df.head()) print(df.shape) # # The Dataset # 569 cancer cells and 33 features to determine whether the cancer cells in our data are benign or malignant. 30 of these 33 features are all numerical features making this problem suitable for a NN. Of these 569 cells, 357 of them (62.74%) are labeled as Benign and 212 of them (37.26%) are Malignant. # Before we begin training our model, we must clean up the data. We start by removing the `id` column as it doesn't provide any useful information. Next, we map the `diagnosis` attribute to 1 for a Malignant tumor and 0 for a Benign Tumor. Finally, we remove a random column titled `Unnamed: 32` as it doesn't provide any useful infromation. # We are left with 569 rows, and 31 columns. 1 column is the diagnosis (ouput value), while the other 30 are the statistics about the tumors (input values). data = df.drop("id", axis=1) data["diagnosis"] = df["diagnosis"].map({"M": 1, "B": 0}) data = data.drop("Unnamed: 32", axis=1) print("Shape of Data we are using:", data.shape) # print(data.head()) # The line below prints the amount of Benign and Malignant tumors in our dataset # print(df['diagnosis'].value_counts()) # # Training and Test samples # We are using a 70:30 Train-Test split for our model which is a rather large testing split, but as you will see later, our model has remarkable accuracy so a large testing dataset isn't counter productive. # Set the Random State for our models, this can be changed or random_state = 42 X, y = data.drop("diagnosis", axis=1), data["diagnosis"] X_train, X_test, y_train, y_test = sk.model_selection.train_test_split( X, y, test_size=0.3, random_state=random_state ) # Scale the Training and Testing Data # This scales all of the data to have a standard deviation of 1 and mean 0 # (Standardizes the data so its a Z score) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.fit_transform(X_test) # print(X_train) # print(y_train) # # This is a diagram of the layout of the model we are using. It has 30 input neurons and 2 hidden layers with 15 neurons each. The ouput layer is 1 neuron because it is a binary classifier (Malignant or Benign). # # Trained on Scaled Data from sklearn.neural_network import MLPClassifier # Create an MLP object with 2 hidden layers, each with 16 neurons mlp = MLPClassifier( hidden_layer_sizes=(16, 16), max_iter=1000, activation="relu", solver="adam", batch_size=32, random_state=random_state, ) # Train the MLP using the input and output datasets model = mlp.fit(X_train, y_train) # Plot the training and validation loss plt.plot(model.loss_curve_, label="Training loss") if mlp.validation_scores_ != None: plt.plot(mlp.validation_scores_) plt.title("Loss Curve") plt.xlabel("Epochs") plt.ylabel("Loss") plt.legend() plt.xlim(0, 400) plt.show() accuracy = mlp.score(X_test, y_test) print(f"Accuracy: {accuracy}") # The loss curve is a graphical representation of the performance of a machine learning model during the training process. In this case, the curve shows that the loss (also known as error) of the model is continuously decreasing over time. This is a good sign, as it indicates that the model is improving in accuracy and becoming more effective at making predictions with each epoch. The line starts at around ~.7, showing it has ~30% accuracy at the start of training and it ends at ~.02 showing it has ~98% accruacy by the end of training. # # Plotting Learning Curve from sklearn.model_selection import learning_curve # do 10 fold cross validation (cv = 10) train_sizes, train_scores, test_scores = learning_curve( model, X, y, train_sizes=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1], cv=10, n_jobs=-1, ) # Compute the mean and standard deviation of the train and test scores train_scores_mean = train_scores.mean(axis=1) train_scores_std = train_scores.std(axis=1) test_scores_mean = test_scores.mean(axis=1) test_scores_std = test_scores.std(axis=1) # Plot the learning curve plt.figure(figsize=(8, 6)) plt.title("Learning Curve (MLP)") plt.xlabel("Training Examples") plt.ylabel("Accuracy") plt.grid() plt.fill_between( train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r", ) plt.fill_between( train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="b", ) plt.plot(train_sizes, train_scores_mean, "o-", color="r", label="Training Score") plt.plot(train_sizes, test_scores_mean, "o-", color="b", label="Cross-Validation Score") plt.legend(loc="best") plt.show() # The plot above is a learning curve tested using 10-fold cross validation. # The x-axis represents the number of training examples used in the model's training, while the y-axis represents the accuracy of the model on both the training set (in red) and the cross-validation set (in blue). The learning curve is a graphical representation of how the model's accuracy improves as the number of training examples increases. The shaded regions around the curves show the standard deviation of the accuracy scores. # As we can see from the plot of the learning curve above, our model starts out overfitting as the gap between the training score and cross validation score is very large. As the amount of training examples increases, the gap between the training and cross-validation scores decreases which suggests that the model is becoming less overfit as more data is added. Towards the end of training, the gap between the training score and cross-validation score is very small, suggesting that our model properly fits the dataset. # The accuracy of the model on both the training and cross-validation sets appear to platue at around 300+ training examples which suggests that adding more data beyond this point will not lead to significant improvements in the model's accuracy. # # Confusion Matrix from sklearn.metrics import confusion_matrix # use the model to predict class labels for test data y_pred = mlp.predict(X_test) # generate confusion matrix cm = confusion_matrix(y_test, y_pred) classes = ["Malignant", "Benign"] # plot confusion matrix with labels classes = ["Malignant", "Benign"] fig, ax = plt.subplots() im = ax.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues) ax.figure.colorbar(im, ax=ax, label="Number of Samples") ax.set( xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), xticklabels=classes, yticklabels=classes, title="Confusion matrix", ylabel="True label", xlabel="Predicted label", ) # add text labels to each box thresh = cm.max() / 2 for i in range(cm.shape[0]): for j in range(cm.shape[1]): label = "" if i == 0 and j == 0: label = "TP" elif i == 0 and j == 1: label = "FP" elif i == 1 and j == 0: label = "FN" else: label = "TN" ax.text( j, i, format(label + "\n" + str(cm[i, j]), "s"), ha="center", va="center", color="white" if cm[i, j] > thresh else "black", ) # ax.text(j, i, f"{['TN', 'FP'][j>i]}-{['FN', 'TP'][j>i]}", # ha="center", va="top", fontsize=10, # color="white" if cm[i, j] > thresh else "black") fig.tight_layout() plt.show() # Prints out the FN and FP inputs. # for i in range(len(y_test)): # if y_test.iloc[i] == 1 and y_pred[i] == 0: # print("FN\n", df.iloc[i], '\n') # elif y_test.iloc[i] == 0 and y_pred[i] == 1: # print("FP\n", df.iloc[i], '\n') # The figure above is a confusion matrix # A confusion matrix is a table used to evaluate the performance of a classification model. It compares the predicted and actual classes of a set of test data and displays the results in a matrix format. The matrix consists of four quadrants, where the vertical axis shows the actual class labels, and the horizontal axis shows the predicted class labels. The four quadrants are: # 1) True Positive (TP): The number of correctly predicted positive samples. # 2) False Positive (FP): The number of incorrectly predicted positive samples. # 3) False Negative (FN): The number of incorrectly predicted negative samples. # 4) True Negative (TN): The number of correctly predicted negative samples. # # We can calulate the accuracy of the model with this infomation by dividing the amount of correctly predicted cases to the amount of inccorectly predicted cases # $$ # \frac{T_P+T_N}{T_P+F_P+F_N+T_N} = \frac{107+61}{107+1+2+61} = \frac{168}{171} = .982456 \approx 98.25\% # $$ # weights = model.coefs_[0] # print(model.coefs_[0]) # print(weights.shape) plt.figure(figsize=(12, 8)) plt.imshow(weights, cmap="coolwarm", aspect="auto") plt.colorbar(label="Weight") plt.title("Weights of the first hidden layer") plt.xlabel("Hidden units") x_lab = [i for i in range(1, 17)] plt.yticks(range(len(X.columns)), X.columns) plt.xticks(range(len(x_lab)), x_lab, rotation=0) plt.ylabel("Input features") plt.show()
false
1
2,915
0
3,005
2,915
129082617
<jupyter_start><jupyter_text>Breast Cancer Wisconsin (Diagnostic) Data Set Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. n the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34]. This database is also available through the UW CS ftp server: ftp ftp.cs.wisc.edu cd math-prog/cpo-dataset/machine-learn/WDBC/ Also can be found on UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29 Attribute Information: 1) ID number 2) Diagnosis (M = malignant, B = benign) 3-32) Ten real-valued features are computed for each cell nucleus: a) radius (mean of distances from center to points on the perimeter) b) texture (standard deviation of gray-scale values) c) perimeter d) area e) smoothness (local variation in radius lengths) f) compactness (perimeter^2 / area - 1.0) g) concavity (severity of concave portions of the contour) h) concave points (number of concave portions of the contour) i) symmetry j) fractal dimension ("coastline approximation" - 1) The mean, standard error and "worst" or largest (mean of the three largest values) of these features were computed for each image, resulting in 30 features. For instance, field 3 is Mean Radius, field 13 is Radius SE, field 23 is Worst Radius. All feature values are recoded with four significant digits. Missing attribute values: none Class distribution: 357 benign, 212 malignant Kaggle dataset identifier: breast-cancer-wisconsin-data <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualization library import matplotlib.pyplot as plt import plotly.express as px import plotly.io as pio # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import time from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # import warnings library import warnings import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import warnings warnings.filterwarnings("ignore") # Veri İçeriği # 1. 1. kimlik Numarası # 1. 2. Teşhis (M = malign, B = iyi huylu) # 1. 3. yarıçap (merkezden çevre üzerindeki noktalara olan mesafelerin ortalaması) # 1. 4. doku (gri tonlama değerlerinin standart sapması) # 1. 5. çevre # 1. 6. alan # 1. 7. pürüzsüzlük (yarıçap uzunluklarında yerel değişiklik) # 1. 8. kompaktlık (çevre^2 / alan - 1,0) # 1. 9. içbükeylik (konturun içbükey kısımlarının ciddiyeti) # 1. 10. içbükey noktalar (konturun içbükey kısımlarının sayısı) # 1. 11. simetri # 1. 12. fraktal boyut ("kıyı şeridi yaklaşımı" - 1) # 1. 13. Bu özelliklerin ortalaması, standart hatası ve "en kötü" veya en büyüğü (en büyük üç değerin ortalaması), her görüntü için hesaplandı ve sonuçta 30 özellik elde edildi. Örneğin, alan 3 Ortalama Yarıçap, alan 13 Yarıçap SE, alan 23 En Kötü Yarıçaptır. # 1. 14. Tüm özellik değerleri, dört anlamlı basamakla yeniden kodlanır. # 1. 15. Eksik özellik değerleri: yok # 1. 16. Sınıf dağılımı: 357 iyi huylu, 212 kötü huylu # ---------------------------- # Data Content # 1. ID number # 2. Diagnosis (M = malignant, B = benign) # 3. radius (mean of distances from center to points on the perimeter) # 4. texture (standard deviation of gray-scale values) # 5. perimeter # 6. area # 7. smoothness (local variation in radius lengths) # 8. compactness (perimeter^2 / area - 1.0) # 9. concavity (severity of concave portions of the contour) # 10. concave points (number of concave portions of the contour) # 11. symmetry # 12. fractal dimension ("coastline approximation" - 1) # 13. The mean, standard error and "worst" or largest (mean of the three largest values) of these features were computed for each image, resulting in 30 features. For instance, field 3 is Mean Radius, field 13 is Radius SE, field 23 is Worst Radius. # 14. All feature values are recoded with four significant digits. # 15. Missing attribute values: none # 16. Class distribution: 357 benign, 212 malignant data = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv") data.head() # Dikkatimi çeken 4 şey var 1) Sınıflandırma için kullanılamayacak bir id var 2) Tanı bizim sınıf etiketimiz 3) Unnamed: 32 özelliği NaN içeriyor yani ihtiyacımız yok. 4) Diğer özellik adları hakkında hiçbir fikrim yok aslında ihtiyacım yok çünkü makine öğrenimi harika :) # Bu nedenle, bu gereksiz özellikleri bırakın. Ancak bunun bir özellik seçimi olmadığını unutmayın. Bu bir pub'a göz atmak gibi, içeceğimizi henüz seçmiyoruz !!! # feature names as a list col = data.columns # .columns gives columns names in data print(col) # y includes our labels and x includes our features y = data.diagnosis # M or B list = ["Unnamed: 32", "id", "diagnosis"] x = data.drop(list, axis=1) x.head() fig = px.histogram(y, x="diagnosis", color="diagnosis", width=700, height=500) fig.show() # Tamam, şimdi özelliklerimiz var ama ne anlama geliyorlar ya da aslında bu özellikler hakkında ne kadar bilmemiz gerekiyor? varyans, standart sapma, örnek sayısı (count) veya max min değerleri. Bu tür bilgiler, verilerde neler olup bittiğini anlamaya yardımcı olur. Örneğin, aklıma field_mean özelliğinin max değeri 2500 ve smoothness_mean özelliklerinin max 0.16340 olduğu sorusu geldi. Bu nedenle görselleştirme, özellik seçimi, özellik çıkarma veya sınıflandırmadan önce standartlaştırmaya veya normalleştirmeye ihtiyacımız var mı? Cevap evet ve hayır şaşırtıcı değil. # Neyse adım adım gidelim ve görselleştirme ile başlayalım. x.describe() # görselleştirme # Verileri görselleştirmek için, sizi bilgilendirmek ve arazilerin çeşitliliği için diğer çekirdeklerde kullanılmayan seaborn grafiklerini kullanacağız. Gerçek hayatta kullandığım şeyler çoğunlukla keman planı ve sürü planıdır. Unutmayın, özellik seçmiyoruz, bar kapısındaki içecek listesine bakmak gibi verileri öğrenmeye çalışıyoruz. # Keman ve sürü grafiğinden önce normalleştirme veya standardizasyona ihtiyacımız var. Çünkü özelliklerin değerleri arasındaki farklar arsa üzerinde gözlemlenemeyecek kadar yüksektir. Özellikleri 3 grupta çiziyorum ve her grupta daha iyi gözlemlemek için 10 özellik var. # first ten features data_dia = y data = x data_n_2 = (data - data.mean()) / (data.std()) # standardization data = pd.concat([y, data_n_2.iloc[:, 0:10]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") fig = px.violin( data, y="value", x="features", color="diagnosis", box=True, points="all" ) fig.show() # Yukarıdaki grafiği birlikte yorumlayalım. Örneğin, texture_mean özelliğinde Malign ve Benign'in ortancası ayrılmış gibi görünüyor, bu nedenle sınıflandırma için iyi olabilir. Ancak fractal_dimension_mean özelliğinde Malign ve Benign'ın ortancası ayrılmış gibi görünmediğinden sınıflandırma için iyi bilgi vermez. # Second ten features data = pd.concat([y, data_n_2.iloc[:, 10:20]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") fig = px.violin( data, y="value", x="features", color="diagnosis", box=True, points="all" ) fig.show() # third ten features data = pd.concat([y, data_n_2.iloc[:, 20:31]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") fig = px.violin( data, y="value", x="features", color="diagnosis", box=True, points="all" ) fig.show() # Yukarıdaki arsa hakkında bir şey daha yorumlayalım, concavity_worst ve concave point_worst değişkeni benzer görünüyor ama birbirleriyle ilişkili olup olmadıklarına nasıl karar verebiliriz. (Her zaman doğru değil ama temel olarak özellikler birbiriyle ilişkiliyse bunlardan birini bırakabiliriz) # İki özelliği daha derinlemesine karşılaştırmak için ortak çizimi kullanalım. Buna aşağıdaki ortak arsada bakın, gerçekten ilişkilidir. Pearsonr değeri korelasyon değeridir ve 1 en yüksek değerdir. Dolayısıyla 0.86 korelasyonlu olduklarını söylemek için yeterli görünmektedir. Unutmayın, özellikleri henüz seçmiyoruz, sadece onlar hakkında fikir sahibi olmaya çalışıyoruz. sns.set(style="white") df = x.loc[:, ["radius_worst", "perimeter_worst", "area_worst"]] g = sns.PairGrid(df, diag_sharey=False) g.map_lower(sns.kdeplot, cmap="Blues_d") g.map_upper(plt.scatter) g.map_diag(sns.kdeplot, lw=3) sns.set(style="whitegrid", palette="muted") data_dia = y data = x data_n_2 = (data - data.mean()) / (data.std()) # standardization data = pd.concat([y, data_n_2.iloc[:, 0:10]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") plt.figure(figsize=(10, 10)) tic = time.time() sns.swarmplot(x="features", y="value", hue="diagnosis", data=data) plt.xticks(rotation=90) data = pd.concat([y, data_n_2.iloc[:, 10:20]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") plt.figure(figsize=(10, 10)) sns.swarmplot(x="features", y="value", hue="diagnosis", data=data) plt.xticks(rotation=90) data = pd.concat([y, data_n_2.iloc[:, 20:31]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") plt.figure(figsize=(10, 10)) sns.swarmplot(x="features", y="value", hue="diagnosis", data=data) toc = time.time() plt.xticks(rotation=90) print("swarm plot time: ", toc - tic, " s") # Harika görünüyorlar. Ve varyansı daha net görebilirsiniz. Size bir soru sorayım, bu üç parselde hangi özellik sınıflandırma açısından daha net görünüyor. Bana göre son sürü arsasında area_worst kötü huylu ve iyi huylu gibi görünüyor, tamamen değil, çoğunlukla ayrılıyor. Ancak sürü arsa 2'deki pürüzsüzlük_se, kötü huylu ve iyi huylu gibi görünüyor, bu nedenle bu özelliği kullanırken sınıflandırmak zor. # Ya özellikler arasındaki tüm korelasyonu gözlemlemek istiyorsak? Evet haklısın. Cevap, eski ama güçlü çizim yöntemi olan ısı haritasıdır. def dummies(train_df: pd.DataFrame, columns): from sklearn import preprocessing le = preprocessing.LabelEncoder() train_df[columns] = le.fit_transform(train_df[columns]) train_df = pd.get_dummies(train_df, columns=[columns]) return train_df dataa = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv") dataa = dummies(dataa, "diagnosis") dataa.head() dataa["diagnosis"] = dataa["diagnosis_0"] list = ["Unnamed: 32", "id", "diagnosis_1", "diagnosis_0"] dataa = dataa.drop(list, axis=1) # correlation map f, ax = plt.subplots(figsize=(18, 18)) sns.heatmap(dataa.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax) # correlation map f, ax = plt.subplots(figsize=(18, 18)) sns.heatmap(x.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax) import statsmodels.api as sm def p_values(df, pred_df, row, col, liste: list): """ return X_l new train_dataframe for predict""" global X_l X = np.append(arr=np.ones((row, col)).astype(int), values=df, axis=1) X_l = df.iloc[:, liste].values X_l = pd.DataFrame(np.array(X_l, dtype=float)) model = sm.OLS(pred_df, X_l).fit() return model.summary(), X_l x dataa1 = dataa.drop(labels="diagnosis", axis=1) dataa_s = pd.DataFrame(dataa["diagnosis"]) pvalues = p_values(dataa1, dataa_s, 569, 30, range(0, 30)) pvalues # Özellik Seçimi ve Rastgele Orman Sınıflandırması # Bugün amacımız yeni kokteyller denemek. Mesela sonunda bir bardayız ve farklı tatlar içmek istiyoruz. Bu nedenle içeceklerin içeriklerini karşılaştırmamız gerekir. Bunlardan biri limon içeriyorsa onu içtikten sonra limon içeren diğer içecekleri elimine etmek gerekiyor ki çok farklı tatlar deneyimleyebilelim. # Bu bölümde korelasyonlu özellik seçimi, tek değişkenli özellik seçimi, özyinelemeli özellik eleme (RFE), çapraz doğrulama ile özyinelemeli özellik eleme (RFECV) ve ağaç tabanlı özellik seçimi gibi farklı yöntemlerle öznitelik seçeceğiz. Modelimizi eğitmek ve tahmin etmek için rastgele orman sınıflandırması kullanacağız. # 1) Korelasyon ve rastgele orman sınıflandırması ile özellik seçimi # Haritada görüldüğü gibi ısı rakamı yarıçap_ortalama, çevre_ortalama ve alan_ortalama birbiriyle ilişkilidir, bu nedenle sadece alan_ortalama kullanacağız. Alan_mean'i nasıl bir özellik olarak kullanacağımı sorarsanız, aslında doğru bir cevap yok, sadece sürü grafiklerine bakıyorum ve alan_mean benim için net görünüyor ama denemeden diğer ilişkili özellikler arasında tam ayrım yapamayız. Öyleyse diğer ilişkili özellikleri bulalım ve rastgele orman sınıflandırıcı ile doğruluk görelim. # Kompaktlık_ortalama, içbükeylik_ortalama ve içbükeylik_ortalama birbiriyle ilişkilidir. Bu nedenle sadece içbükeylik_ortalama'yı seçiyorum. Bunların dışında radius_se, perimeter_se ve field_se birbiriyle ilişkilidir ve ben sadece field_se kullanıyorum. yarıçap_en kötü, çevre_en kötü ve alan_en kötü birbiriyle ilişkilidir, bu yüzden ben en kötü alan kullanıyorum. Kompaktlık_en kötü, içbükey_en kötü ve içbükey noktalar_en kötü bu yüzden içbükey_en kötü olanı kullanıyorum. Compactness_se, concavity_se ve concave points_se bu yüzden concavity_se kullanıyorum. texture_mean ve texture_worst birbiriyle ilişkilidir ve ben texture_mean kullanıyorum. field_worst ve area_mean ilişkilidir, ben field_mean kullanıyorum. drop_list1 = [ "perimeter_mean", "radius_mean", "compactness_mean", "concave points_mean", "radius_se", "perimeter_se", "radius_worst", "perimeter_worst", "compactness_worst", "concave points_worst", "compactness_se", "concave points_se", "texture_worst", "area_worst", ] x_1 = x.drop(drop_list1, axis=1) # do not modify x, we will use it later x_1.head() # Düşürme korelasyonlu özelliklerden sonra, aşağıdaki korelasyon matrisinde de görülebileceği gibi, artık korelasyonlu özellik kalmamıştır. Aslında 0.9 korelasyon değeri olduğunu biliyorum ve görüyorsunuz ama onu düşürmezsek ne olacağını birlikte görelim. # correlation map f, ax = plt.subplots(figsize=(14, 14)) sns.heatmap(x_1.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax) # Peki özelliklerimizi seçiyoruz ama doğru mu seçmişiz? Rastgele ormanı kullanalım ve seçilen özelliklere göre doğruluğu bulalım. from sklearn.preprocessing import StandardScaler def scaler(train_df: pd.DataFrame): sc = StandardScaler() train_df = sc.fit_transform(train_df) return train_df from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score, confusion_matrix from sklearn.metrics import accuracy_score x_1 = scaler(x_1) # split data train 70 % and test 30 % x_train, x_test, y_train, y_test = train_test_split( x_1, y, test_size=0.3, random_state=42 ) # random forest classifier with n_estimators=10 (default) clf_rf = RandomForestClassifier(random_state=43) clr_rf = clf_rf.fit(x_train, y_train) ac = accuracy_score(y_test, clf_rf.predict(x_test)) print("Accuracy is: ", ac) cm = confusion_matrix(y_test, clf_rf.predict(x_test)) sns.heatmap(cm, annot=True, fmt="d") # Doğruluk yaklaşık %95'tir ve karışıklık matrisinden de görülebileceği gibi çok az yanlış tahminde bulunuruz. Şimdi daha iyi sonuçlar bulmak için diğer özellik seçim yöntemlerini görelim. # 2) Tek değişkenli özellik seçimi ve rastgele orman sınıflandırması # Tek değişkenli özellik seçiminde, k en yüksek puanlama özelliği dışındaki tüm özellikleri kaldıran SelectKBest'i kullanacağız. http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectKBest.html#sklearn.feature_selection.SelectKBest # Bu yöntemde kaç tane özellik kullanacağımızı seçmemiz gerekiyor. Örneğin, k (özellik sayısı) 5 mi, 10 mu, 15 mi olacak? Cevap sadece deniyor veya sezgisel olarak. Tüm kombinasyonları denemiyorum ama sadece k = 5'i seçiyorum ve en iyi 5 özelliği buluyorum. from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 # find best scored 5 features select_feature = SelectKBest(chi2, k=5).fit(x_train, y_train) print("Score list:", select_feature.scores_) print("Feature list:", x_train.columns) # Sınıflandırılacak en iyi 5 özellik, rea_mean, area_se, texture_mean, concavity_worst and concavity_mean. Öyleyse, yalnızca bu en iyi puan alan 5 özelliği kullanırsak ne olacağını görelim. x_train_2 = select_feature.transform(x_train) x_test_2 = select_feature.transform(x_test) # random forest classifier with n_estimators=10 (default) clf_rf_2 = RandomForestClassifier() clr_rf_2 = clf_rf_2.fit(x_train_2, y_train) ac_2 = accuracy_score(y_test, clf_rf_2.predict(x_test_2)) print("Accuracy is: ", ac_2) cm_2 = confusion_matrix(y_test, clf_rf_2.predict(x_test_2)) sns.heatmap(cm_2, annot=True, fmt="d") # Doğruluk yaklaşık %96'dır ve karışıklık matrisinden de görülebileceği gibi çok az yanlış tahminde bulunuruz. Şimdiye kadar yaptığımız şey, özellikleri korelasyon matrisine ve selectkBest yöntemine göre seçmekti. SelectkBest yönteminde 5 özellik kullanmamıza rağmen doğrulukları benzer görünmektedir. Şimdi daha iyi sonuçlar bulmak için diğer özellik seçim yöntemlerini görelim. # 3) Rastgele orman ile özyinelemeli özellik eleme (RFE) # http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html Temel olarak, sınıflandırma yöntemlerinden birini kullanır (bizim örneğimizde rastgele orman), her bir özelliğe ağırlık atayın. Mutlak ağırlıkları en küçük olanların mevcut set özelliklerinden budanır. Bu prosedür, budanmış sette istenen sayıda özellik elde edilene kadar yinelemeli olarak tekrarlanır. # Önceki yöntemde olduğu gibi, 5 özellik kullanacağız. Ancak hangi 5 özelliği kullanacağız? Bunları RFE yöntemi ile seçeceğiz. from sklearn.feature_selection import RFE # Create the RFE object and rank each pixel clf_rf_3 = RandomForestClassifier() rfe = RFE(estimator=clf_rf_3, n_features_to_select=5, step=1) rfe = rfe.fit(x_train, y_train) ac = accuracy_score(y_test, rfe.predict(x_test)) print("Accuracy is: ", ac) print("Chosen best 5 feature by rfe:", x_train.columns[rfe.support_]) # rfe tarafından seçilen en iyi 5 özellik, texture_mean, field_mean, concavity_mean, field_se, concavity_worst. Önceki (selectkBest) yöntemine tamamen benzerler. Bu nedenle doğruluğu tekrar hesaplamamıza gerek yok. Kısaca rfe ve selectkBest yöntemleri ile iyi bir özellik seçimi yaptığımızı söyleyebiliriz. Ancak gördüğünüz gibi bir sorun var tamam ben en iyi 5 özelliğini iki farklı yöntemle buluyoruz ve bu özellikler aynı ama neden 5. Belki en iyi 2 veya en iyi 15 özelliğini kullanırsak daha iyi doğruluk elde ederiz. Bu nedenle, rfecv yöntemiyle kaç tane özellik kullanmamız gerektiğine bakalım. # 4) Çapraz doğrulama ve rastgele orman sınıflandırması ile özyinelemeli özellik eleme # http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html Şimdi sadece en iyi özellikleri değil, aynı zamanda en iyi doğruluk için kaç tane özelliğe ihtiyacımız olduğunu da bulacağız. from sklearn.feature_selection import RFECV # The "accuracy" scoring is proportional to the number of correct classifications clf_rf_4 = RandomForestClassifier() rfecv = RFECV( estimator=clf_rf_4, step=1, cv=5, scoring="accuracy" ) # 5-fold cross-validation rfecv = rfecv.fit(x_train, y_train) print("Optimal number of features :", rfecv.n_features_) print("Best features :", x_train.columns[rfecv.support_]) # Son olarak, en iyi sınıflandırma için texture_mean, field_mean, concavity_mean, texture_se, field_se, concavity_se,chemistry_se, smoothness_worst, concavity_worst,chemistry_worst ve fractal_dimension_worst olan en iyi 11 özelliği bulduk. Arsa ile en iyi doğruluğa bakalım. def prob_his(train_df, variable_x, variable_y): fig = px.histogram( train_df, x=variable_x, y=variable_y, color=variable_x, histfunc="avg", marginal="box", barmode="overlay", title=variable_x, width=700, height=500, ) fig.update_layout(bargap=0.1, barmode="stack") fig.show() from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge, RidgeCV from sklearn.svm import SVC, SVR from sklearn.ensemble import ( RandomForestClassifier, RandomForestRegressor, VotingClassifier, ) from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier def Classifiers(x_train, y_train, n_splits): random_state = 42 classifier = [ DecisionTreeClassifier(random_state=random_state), SVC(random_state=random_state), RandomForestClassifier(random_state=random_state), LogisticRegression(random_state=random_state), KNeighborsClassifier(), ] dt_param_grid = { "min_samples_split": range(10, 500, 20), "max_depth": range(1, 20, 2), } svc_param_grid = { "kernel": ["rbf"], "gamma": [0.001, 0.01, 0.1, 1], "C": [1, 10, 50, 100, 200, 300, 1000], "probability": [True], } rf_param_grid = { "max_features": [1, 3, 10], "min_samples_split": [2, 3, 10], "min_samples_leaf": [1, 3, 10], "bootstrap": [False], "n_estimators": [100, 300], "criterion": ["gini"], } logreg_param_grid = { "C": np.logspace(-3, 3, 7), "penalty": ["l1", "l2"], "max_iter": [100, 500, 1000], } knn_param_grid = { "n_neighbors": np.linspace(1, 19, 10, dtype=int).tolist(), "weights": ["uniform", "distance"], "metric": ["euclidean", "manhattan"], } classifier_param = [ dt_param_grid, svc_param_grid, rf_param_grid, logreg_param_grid, knn_param_grid, ] cv_result = [] best_estimators = [] for i in range(len(classifier)): clf = GridSearchCV( classifier[i], param_grid=classifier_param[i], cv=StratifiedKFold(n_splits=n_splits), scoring="accuracy", n_jobs=-1, verbose=1, ) clf.fit(x_train, y_train) cv_result.append(clf.best_score_) best_estimators.append(clf.best_estimator_) print(cv_result[i]) cv_results = pd.DataFrame( { "Cross Validation Means": cv_result, "ML Models": [ "DecisionTreeClassifier", "SVM", "RandomForestClassifier", "LogisticRegression", "KNeighborsClassifier", ], } ) prob_his(cv_results, "Cross Validation Means", "ML Models") return best_estimators, cv_results # n_splits parametresi, verilerin bölüneceği kümelerin sayısını belirten bir parametredir. Bu parametre, kullanılan çapraz doğrulama yöntemi (StratifiedKFold) için belirlenir. # Örneğin, n_splits=5 değeri verildiğinde, veri seti beş eşit parçaya ayrılır ve bu beş parçadan biri test seti olarak, geri kalan dört parça ise eğitim seti olarak kullanılır. Bu işlem beş kez tekrarlanarak her bir parça ayrı ayrı test verisi olarak kullanılır ve sonuçlar ortalaması alınarak bir çapraz doğrulama puanı elde edilir. # Bu sayede, modelin performansı veri setinin farklı parçaları üzerinde değerlendirilir ve aynı veri seti üzerinde overfitting sorunu engellenmeye çalışılır. # `GridSearchCV`, hiperparametre optimizasyonu yapmak için kullanılan bir çapraz doğrulama yöntemidir. Verilen bir makine öğrenmesi modeli ve modelin hiperparametrelerinin olası değerleri, `GridSearchCV` ile belirlenir. # `GridSearchCV` ayrıca bir skorlama metriği belirler ve modelin her bir hiperparametre kombinasyonu için skorlama metriği kullanarak performansını ölçer. # `GridSearchCV`, tüm olası hiperparametre kombinasyonlarını deneyerek en iyi hiperparametrelerin seçilmesini sağlar. Bu, manuel olarak hiperparametreleri ayarlamaktan daha verimlidir. # `GridSearchCV`, hiperparametreler için bir "parametre ızgarası" belirler ve bu ızgarada belirtilen tüm hiperparametre kombinasyonlarını deneyerek en iyi kombinasyonu seçer. Bu ızgara, farklı hiperparametreler ve her bir hiperparametrenin olası değerleri arasındaki tüm kombinasyonları içerebilir. # `GridSearchCV` ayrıca bir çapraz doğrulama yöntemi belirler ve hiperparametrelerin performansını ölçmek için bu yöntemi kullanır. Böylece modelin performansı tüm veri kümesinde değil, farklı veri kümeleri üzerinde ölçülür ve modelin genelleştirilebilirliği arttırılır. best_estimators, cv_results = Classifiers(x_train, y_train, 5) best_estimators votingC def Classifiers_estimators( best_estimators, x_train, y_train, x_test, y_test, name_1_estimator, name_2_estimator, name_3_estimator, one_num, two_num, three_num, ): votingC = VotingClassifier( estimators=[ ("SVC", best_estimators[one_num]), ("rfc", best_estimators[two_num]), ("knn", best_estimators[three_num]), ], voting="soft", n_jobs=-1, ) votingC = votingC.fit(x_train, y_train) # print(votingC) y_predict = votingC.predict(x_test) print("accuracy_score", accuracy_score(votingC.predict(x_test), y_test)) cm = confusion_matrix(y_test, y_predict) print("Confusion Matrix:\n", cm) return votingC, y_predict votingC, y_predict = Classifiers_estimators( best_estimators, x_train, y_train, x_test, y_test, "SVC", "rfc", "knn", 1, 2, 4 ) # Bu bir karışıklık matrisidir ve sınıflandırma modellerinin performansını değerlendirmek için kullanılır. Genellikle, doğru ve yanlış sınıflandırma sayılarını gösterir. # Bu özel karışıklık matrisi, iki sınıfın (Sınıf 0 ve Sınıf 1) olduğu bir sınıflandırma modelinin performansını gösterir. Matris, tahminlerin gerçek sınıflarla nasıl karşılaştırıldığını gösterir. # [[107 1] # [3 60]] # Matrisin sol üst köşesi, gerçek sınıfı 0 olan 107 örneğin doğru bir şekilde tahmin edildiğini gösterir. Sağ üst köşesi, gerçek sınıfı 1 olan ancak yanlışlıkla sınıf 0 olarak tahmin edilen 1 örnek gösterir. # Sol alt köşesi, gerçek sınıfı 0 olan ancak yanlışlıkla sınıf 1 olarak tahmin edilen 3 örnek gösterir. Sağ alt köşesi, gerçek sınıfı 1 olan 60 örneğin doğru bir şekilde tahmin edildiğini gösterir. # Bu matrisi kullanarak, modelin doğruluğunu (Accuracy), hassasiyetini (Precision), duyarlılığını (Recall) ve F1 puanını hesaplayabilirsiniz. # 5) Tree based feature selection and random forest classification # http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html In random forest classification method there is a featureimportances attributes that is the feature importances (the higher, the more important the feature). !!! To use feature_importance method, in training data there should not be correlated features. Random forest choose randomly at each iteration, therefore sequence of feature importance list can change. # clf_rf_5 = RandomForestClassifier() clr_rf_5 = clf_rf_5.fit(x_train, y_train) importances = clr_rf_5.feature_importances_ std = np.std([tree.feature_importances_ for tree in clf_rf.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(x_train.shape[1]): print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]])) # Plot the feature importances of the forest plt.figure(1, figsize=(14, 13)) plt.title("Feature importances") plt.bar( range(x_train.shape[1]), importances[indices], color="g", yerr=std[indices], align="center", ) plt.xticks(range(x_train.shape[1]), x_train.columns[indices], rotation=90) plt.xlim([-1, x_train.shape[1]]) plt.show() # Yukarıdaki grafikte de görebileceğiniz gibi, en iyi 5 özellikten sonra özelliklerin önemi azalır. Bu nedenle bu 5 özelliğe odaklanabiliriz. Daha önce üzüldüğüm gibi, özellikleri anlamaya ve en iyisini bulmaya önem veriyorum. # PCA ile Özellik Çıkarma # http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html Özellik çıkarımı için temel bileşen analizi (PCA) kullanacağız. PCA'dan önce, PCA'nın daha iyi performansı için verileri normalleştirmemiz gerekiyor. # split data train 70 % and test 30 % x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.3, random_state=42 ) # normalization x_train_N = (x_train - x_train.mean()) / (x_train.max() - x_train.min()) x_test_N = (x_test - x_test.mean()) / (x_test.max() - x_test.min()) from sklearn.decomposition import PCA pca = PCA() pca.fit(x_train_N) plt.figure(1, figsize=(14, 13)) plt.clf() plt.axes([0.2, 0.2, 0.7, 0.7]) plt.plot(pca.explained_variance_ratio_, linewidth=2) plt.axis("tight") plt.xlabel("n_components") plt.ylabel("explained_variance_ratio_") # ## Sonuç # Kısaca özellik seçimi ve veri görselleştirmenin önemini göstermeye çalıştım. Varsayılan veriler 33 özelliği içerir, ancak özellik seçiminden sonra bu sayıyı 33'ten 5'e %95 doğrulukla düşürürüz. Bu çekirdekte sadece temel şeyleri denedik, eminim bu veri görselleştirme ve özellik seçme yöntemleriyle, %95 doğruluğu kolaylıkla aşabilirsiniz. Belki diğer sınıflandırma yöntemlerini kullanabilirsiniz. column = dataa1.columns dataa1 pvalues = p_values(dataa1, dataa_s, 569, 30, range(0, 30)) a = [1, 4, 5, 11, 12, 18, 24, 25] for i in a: print("'", column[i], "'", ",", end=" ") drop_list4 = [ "texture_mean", "smoothness_mean", "compactness_mean", "texture_se", "perimeter_se", "symmetry_se", "smoothness_worst", "compactness_worst", ] x_4 = x[drop_list4] x_4.head() x_4["diagnosis"] = y pio.templates.default = "plotly_dark" def create_hist(xval, color): fig = px.histogram( x_4, x=xval, color=color, title=xval, color_discrete_sequence=["yellowgreen", "gold"], width=600, height=300, ) fig.show() create_hist("texture_mean", "diagnosis") create_hist("smoothness_mean", "diagnosis") create_hist("compactness_mean", "diagnosis") create_hist("texture_se", "diagnosis") create_hist("perimeter_se", "diagnosis") create_hist("symmetry_se", "diagnosis") create_hist("smoothness_worst", "diagnosis") create_hist("compactness_worst", "diagnosis") # correlation map f, ax = plt.subplots(figsize=(14, 14)) sns.heatmap(x_4.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax) pio.templates.default = "plotly_dark" def create_scatter(xval, yval): fig = px.scatter( x_4, x=xval, y=yval, color="diagnosis", title=xval + " " + "vs" + " " + yval, color_discrete_sequence=["yellowgreen", "gold"], width=600, height=300, ) fig.show() create_scatter("smoothness_mean", "compactness_mean") create_scatter("smoothness_mean", "compactness_worst") create_scatter("compactness_mean", "compactness_worst") create_scatter("smoothness_worst", "smoothness_mean") create_scatter("compactness_mean", "smoothness_mean") from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder x4 = x_4.drop("diagnosis", axis=1) y4 = x_4["diagnosis"] le = LabelEncoder() y4 = le.fit_transform(y) x_train, x_test, y_train, y_test = train_test_split( x4, y4, test_size=0.2, random_state=0, stratify=y ) x_train.shape, x_test.shape from sklearn.preprocessing import StandardScaler scaler = StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) import tensorflow as tf from tensorflow.keras import Sequential from tensorflow.keras.layers import ( Conv1D, MaxPool1D, Flatten, Dense, Dropout, BatchNormalization, ) from tensorflow.keras.optimizers import Adam x_train = x_train.reshape(455, 8, 1) x_test = x_test.reshape(114, 8, 1) epochs = 50 model = Sequential() model.add(Conv1D(filters=32, kernel_size=2, activation="relu", input_shape=(8, 1))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D(filters=64, kernel_size=2, activation="relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(64, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(1, activation="sigmoid")) model.summary() model.compile( optimizer=Adam(lr=0.00005), loss="binary_crossentropy", metrics=["accuracy"] ) history = model.fit( x_train, y_train, epochs=60, validation_data=(x_test, y_test), verbose=1 ) epoch_range = range(1, 61) plt.plot(epoch_range, history.history["accuracy"]) plt.plot(epoch_range, history.history["val_accuracy"]) plt.title("Model Accuracy") plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend(["Train", "Val"], loc="upper left") plt.show() epoch_range = range(1, 61) plt.plot(epoch_range, history.history["loss"]) plt.plot(epoch_range, history.history["val_loss"]) plt.title("Model Loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.legend(["Train", "Val"], loc="upper left") plt.show() from mlxtend.plotting import plot_confusion_matrix from sklearn.metrics import accuracy_score, confusion_matrix y_pred = model.predict(x_test) rounded_arr = np.round(y_pred, 0) accuracy_score(y_test, rounded_arr) mat = confusion_matrix(y_test, rounded_arr) classes_name = ["Malignant", "Benign"] plot_confusion_matrix(mat, figsize=(10, 8), class_names=classes_name, show_normed=True) plt.xticks(rotation=0)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/082/129082617.ipynb
breast-cancer-wisconsin-data
null
[{"Id": 129082617, "ScriptId": 38361348, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9683291, "CreationDate": "05/10/2023 21:27:20", "VersionNumber": 4.0, "Title": "Breast Cancer Wisconsin Feature Selection and CNN", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 616.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 616.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184821412, "KernelVersionId": 129082617, "SourceDatasetVersionId": 408}]
[{"Id": 408, "DatasetId": 180, "DatasourceVersionId": 408, "CreatorUserId": 711301, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "09/25/2016 10:49:04", "VersionNumber": 2.0, "Title": "Breast Cancer Wisconsin (Diagnostic) Data Set", "Slug": "breast-cancer-wisconsin-data", "Subtitle": "Predict whether the cancer is benign or malignant", "Description": "Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. \nn the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: \"Robust Linear Programming Discrimination of Two Linearly Inseparable Sets\", Optimization Methods and Software 1, 1992, 23-34]. \n\nThis database is also available through the UW CS ftp server: \nftp ftp.cs.wisc.edu \ncd math-prog/cpo-dataset/machine-learn/WDBC/\n\nAlso can be found on UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29\n\nAttribute Information:\n\n1) ID number \n2) Diagnosis (M = malignant, B = benign) \n3-32) \n\nTen real-valued features are computed for each cell nucleus: \n\na) radius (mean of distances from center to points on the perimeter) \nb) texture (standard deviation of gray-scale values) \nc) perimeter \nd) area \ne) smoothness (local variation in radius lengths) \nf) compactness (perimeter^2 / area - 1.0) \ng) concavity (severity of concave portions of the contour) \nh) concave points (number of concave portions of the contour) \ni) symmetry \nj) fractal dimension (\"coastline approximation\" - 1)\n\nThe mean, standard error and \"worst\" or largest (mean of the three\nlargest values) of these features were computed for each image,\nresulting in 30 features. For instance, field 3 is Mean Radius, field\n13 is Radius SE, field 23 is Worst Radius.\n\nAll feature values are recoded with four significant digits.\n\nMissing attribute values: none\n\nClass distribution: 357 benign, 212 malignant", "VersionNotes": "This updated dataset has column names added", "TotalCompressedBytes": 125204.0, "TotalUncompressedBytes": 125204.0}]
[{"Id": 180, "CreatorUserId": 711301, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 408.0, "CurrentDatasourceVersionId": 408.0, "ForumId": 1547, "Type": 2, "CreationDate": "09/19/2016 20:27:05", "LastActivityDate": "02/06/2018", "TotalViews": 1744898, "TotalDownloads": 301790, "TotalVotes": 3191, "TotalKernels": 2628}]
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualization library import matplotlib.pyplot as plt import plotly.express as px import plotly.io as pio # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import time from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # import warnings library import warnings import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. import warnings warnings.filterwarnings("ignore") # Veri İçeriği # 1. 1. kimlik Numarası # 1. 2. Teşhis (M = malign, B = iyi huylu) # 1. 3. yarıçap (merkezden çevre üzerindeki noktalara olan mesafelerin ortalaması) # 1. 4. doku (gri tonlama değerlerinin standart sapması) # 1. 5. çevre # 1. 6. alan # 1. 7. pürüzsüzlük (yarıçap uzunluklarında yerel değişiklik) # 1. 8. kompaktlık (çevre^2 / alan - 1,0) # 1. 9. içbükeylik (konturun içbükey kısımlarının ciddiyeti) # 1. 10. içbükey noktalar (konturun içbükey kısımlarının sayısı) # 1. 11. simetri # 1. 12. fraktal boyut ("kıyı şeridi yaklaşımı" - 1) # 1. 13. Bu özelliklerin ortalaması, standart hatası ve "en kötü" veya en büyüğü (en büyük üç değerin ortalaması), her görüntü için hesaplandı ve sonuçta 30 özellik elde edildi. Örneğin, alan 3 Ortalama Yarıçap, alan 13 Yarıçap SE, alan 23 En Kötü Yarıçaptır. # 1. 14. Tüm özellik değerleri, dört anlamlı basamakla yeniden kodlanır. # 1. 15. Eksik özellik değerleri: yok # 1. 16. Sınıf dağılımı: 357 iyi huylu, 212 kötü huylu # ---------------------------- # Data Content # 1. ID number # 2. Diagnosis (M = malignant, B = benign) # 3. radius (mean of distances from center to points on the perimeter) # 4. texture (standard deviation of gray-scale values) # 5. perimeter # 6. area # 7. smoothness (local variation in radius lengths) # 8. compactness (perimeter^2 / area - 1.0) # 9. concavity (severity of concave portions of the contour) # 10. concave points (number of concave portions of the contour) # 11. symmetry # 12. fractal dimension ("coastline approximation" - 1) # 13. The mean, standard error and "worst" or largest (mean of the three largest values) of these features were computed for each image, resulting in 30 features. For instance, field 3 is Mean Radius, field 13 is Radius SE, field 23 is Worst Radius. # 14. All feature values are recoded with four significant digits. # 15. Missing attribute values: none # 16. Class distribution: 357 benign, 212 malignant data = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv") data.head() # Dikkatimi çeken 4 şey var 1) Sınıflandırma için kullanılamayacak bir id var 2) Tanı bizim sınıf etiketimiz 3) Unnamed: 32 özelliği NaN içeriyor yani ihtiyacımız yok. 4) Diğer özellik adları hakkında hiçbir fikrim yok aslında ihtiyacım yok çünkü makine öğrenimi harika :) # Bu nedenle, bu gereksiz özellikleri bırakın. Ancak bunun bir özellik seçimi olmadığını unutmayın. Bu bir pub'a göz atmak gibi, içeceğimizi henüz seçmiyoruz !!! # feature names as a list col = data.columns # .columns gives columns names in data print(col) # y includes our labels and x includes our features y = data.diagnosis # M or B list = ["Unnamed: 32", "id", "diagnosis"] x = data.drop(list, axis=1) x.head() fig = px.histogram(y, x="diagnosis", color="diagnosis", width=700, height=500) fig.show() # Tamam, şimdi özelliklerimiz var ama ne anlama geliyorlar ya da aslında bu özellikler hakkında ne kadar bilmemiz gerekiyor? varyans, standart sapma, örnek sayısı (count) veya max min değerleri. Bu tür bilgiler, verilerde neler olup bittiğini anlamaya yardımcı olur. Örneğin, aklıma field_mean özelliğinin max değeri 2500 ve smoothness_mean özelliklerinin max 0.16340 olduğu sorusu geldi. Bu nedenle görselleştirme, özellik seçimi, özellik çıkarma veya sınıflandırmadan önce standartlaştırmaya veya normalleştirmeye ihtiyacımız var mı? Cevap evet ve hayır şaşırtıcı değil. # Neyse adım adım gidelim ve görselleştirme ile başlayalım. x.describe() # görselleştirme # Verileri görselleştirmek için, sizi bilgilendirmek ve arazilerin çeşitliliği için diğer çekirdeklerde kullanılmayan seaborn grafiklerini kullanacağız. Gerçek hayatta kullandığım şeyler çoğunlukla keman planı ve sürü planıdır. Unutmayın, özellik seçmiyoruz, bar kapısındaki içecek listesine bakmak gibi verileri öğrenmeye çalışıyoruz. # Keman ve sürü grafiğinden önce normalleştirme veya standardizasyona ihtiyacımız var. Çünkü özelliklerin değerleri arasındaki farklar arsa üzerinde gözlemlenemeyecek kadar yüksektir. Özellikleri 3 grupta çiziyorum ve her grupta daha iyi gözlemlemek için 10 özellik var. # first ten features data_dia = y data = x data_n_2 = (data - data.mean()) / (data.std()) # standardization data = pd.concat([y, data_n_2.iloc[:, 0:10]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") fig = px.violin( data, y="value", x="features", color="diagnosis", box=True, points="all" ) fig.show() # Yukarıdaki grafiği birlikte yorumlayalım. Örneğin, texture_mean özelliğinde Malign ve Benign'in ortancası ayrılmış gibi görünüyor, bu nedenle sınıflandırma için iyi olabilir. Ancak fractal_dimension_mean özelliğinde Malign ve Benign'ın ortancası ayrılmış gibi görünmediğinden sınıflandırma için iyi bilgi vermez. # Second ten features data = pd.concat([y, data_n_2.iloc[:, 10:20]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") fig = px.violin( data, y="value", x="features", color="diagnosis", box=True, points="all" ) fig.show() # third ten features data = pd.concat([y, data_n_2.iloc[:, 20:31]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") fig = px.violin( data, y="value", x="features", color="diagnosis", box=True, points="all" ) fig.show() # Yukarıdaki arsa hakkında bir şey daha yorumlayalım, concavity_worst ve concave point_worst değişkeni benzer görünüyor ama birbirleriyle ilişkili olup olmadıklarına nasıl karar verebiliriz. (Her zaman doğru değil ama temel olarak özellikler birbiriyle ilişkiliyse bunlardan birini bırakabiliriz) # İki özelliği daha derinlemesine karşılaştırmak için ortak çizimi kullanalım. Buna aşağıdaki ortak arsada bakın, gerçekten ilişkilidir. Pearsonr değeri korelasyon değeridir ve 1 en yüksek değerdir. Dolayısıyla 0.86 korelasyonlu olduklarını söylemek için yeterli görünmektedir. Unutmayın, özellikleri henüz seçmiyoruz, sadece onlar hakkında fikir sahibi olmaya çalışıyoruz. sns.set(style="white") df = x.loc[:, ["radius_worst", "perimeter_worst", "area_worst"]] g = sns.PairGrid(df, diag_sharey=False) g.map_lower(sns.kdeplot, cmap="Blues_d") g.map_upper(plt.scatter) g.map_diag(sns.kdeplot, lw=3) sns.set(style="whitegrid", palette="muted") data_dia = y data = x data_n_2 = (data - data.mean()) / (data.std()) # standardization data = pd.concat([y, data_n_2.iloc[:, 0:10]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") plt.figure(figsize=(10, 10)) tic = time.time() sns.swarmplot(x="features", y="value", hue="diagnosis", data=data) plt.xticks(rotation=90) data = pd.concat([y, data_n_2.iloc[:, 10:20]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") plt.figure(figsize=(10, 10)) sns.swarmplot(x="features", y="value", hue="diagnosis", data=data) plt.xticks(rotation=90) data = pd.concat([y, data_n_2.iloc[:, 20:31]], axis=1) data = pd.melt(data, id_vars="diagnosis", var_name="features", value_name="value") plt.figure(figsize=(10, 10)) sns.swarmplot(x="features", y="value", hue="diagnosis", data=data) toc = time.time() plt.xticks(rotation=90) print("swarm plot time: ", toc - tic, " s") # Harika görünüyorlar. Ve varyansı daha net görebilirsiniz. Size bir soru sorayım, bu üç parselde hangi özellik sınıflandırma açısından daha net görünüyor. Bana göre son sürü arsasında area_worst kötü huylu ve iyi huylu gibi görünüyor, tamamen değil, çoğunlukla ayrılıyor. Ancak sürü arsa 2'deki pürüzsüzlük_se, kötü huylu ve iyi huylu gibi görünüyor, bu nedenle bu özelliği kullanırken sınıflandırmak zor. # Ya özellikler arasındaki tüm korelasyonu gözlemlemek istiyorsak? Evet haklısın. Cevap, eski ama güçlü çizim yöntemi olan ısı haritasıdır. def dummies(train_df: pd.DataFrame, columns): from sklearn import preprocessing le = preprocessing.LabelEncoder() train_df[columns] = le.fit_transform(train_df[columns]) train_df = pd.get_dummies(train_df, columns=[columns]) return train_df dataa = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv") dataa = dummies(dataa, "diagnosis") dataa.head() dataa["diagnosis"] = dataa["diagnosis_0"] list = ["Unnamed: 32", "id", "diagnosis_1", "diagnosis_0"] dataa = dataa.drop(list, axis=1) # correlation map f, ax = plt.subplots(figsize=(18, 18)) sns.heatmap(dataa.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax) # correlation map f, ax = plt.subplots(figsize=(18, 18)) sns.heatmap(x.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax) import statsmodels.api as sm def p_values(df, pred_df, row, col, liste: list): """ return X_l new train_dataframe for predict""" global X_l X = np.append(arr=np.ones((row, col)).astype(int), values=df, axis=1) X_l = df.iloc[:, liste].values X_l = pd.DataFrame(np.array(X_l, dtype=float)) model = sm.OLS(pred_df, X_l).fit() return model.summary(), X_l x dataa1 = dataa.drop(labels="diagnosis", axis=1) dataa_s = pd.DataFrame(dataa["diagnosis"]) pvalues = p_values(dataa1, dataa_s, 569, 30, range(0, 30)) pvalues # Özellik Seçimi ve Rastgele Orman Sınıflandırması # Bugün amacımız yeni kokteyller denemek. Mesela sonunda bir bardayız ve farklı tatlar içmek istiyoruz. Bu nedenle içeceklerin içeriklerini karşılaştırmamız gerekir. Bunlardan biri limon içeriyorsa onu içtikten sonra limon içeren diğer içecekleri elimine etmek gerekiyor ki çok farklı tatlar deneyimleyebilelim. # Bu bölümde korelasyonlu özellik seçimi, tek değişkenli özellik seçimi, özyinelemeli özellik eleme (RFE), çapraz doğrulama ile özyinelemeli özellik eleme (RFECV) ve ağaç tabanlı özellik seçimi gibi farklı yöntemlerle öznitelik seçeceğiz. Modelimizi eğitmek ve tahmin etmek için rastgele orman sınıflandırması kullanacağız. # 1) Korelasyon ve rastgele orman sınıflandırması ile özellik seçimi # Haritada görüldüğü gibi ısı rakamı yarıçap_ortalama, çevre_ortalama ve alan_ortalama birbiriyle ilişkilidir, bu nedenle sadece alan_ortalama kullanacağız. Alan_mean'i nasıl bir özellik olarak kullanacağımı sorarsanız, aslında doğru bir cevap yok, sadece sürü grafiklerine bakıyorum ve alan_mean benim için net görünüyor ama denemeden diğer ilişkili özellikler arasında tam ayrım yapamayız. Öyleyse diğer ilişkili özellikleri bulalım ve rastgele orman sınıflandırıcı ile doğruluk görelim. # Kompaktlık_ortalama, içbükeylik_ortalama ve içbükeylik_ortalama birbiriyle ilişkilidir. Bu nedenle sadece içbükeylik_ortalama'yı seçiyorum. Bunların dışında radius_se, perimeter_se ve field_se birbiriyle ilişkilidir ve ben sadece field_se kullanıyorum. yarıçap_en kötü, çevre_en kötü ve alan_en kötü birbiriyle ilişkilidir, bu yüzden ben en kötü alan kullanıyorum. Kompaktlık_en kötü, içbükey_en kötü ve içbükey noktalar_en kötü bu yüzden içbükey_en kötü olanı kullanıyorum. Compactness_se, concavity_se ve concave points_se bu yüzden concavity_se kullanıyorum. texture_mean ve texture_worst birbiriyle ilişkilidir ve ben texture_mean kullanıyorum. field_worst ve area_mean ilişkilidir, ben field_mean kullanıyorum. drop_list1 = [ "perimeter_mean", "radius_mean", "compactness_mean", "concave points_mean", "radius_se", "perimeter_se", "radius_worst", "perimeter_worst", "compactness_worst", "concave points_worst", "compactness_se", "concave points_se", "texture_worst", "area_worst", ] x_1 = x.drop(drop_list1, axis=1) # do not modify x, we will use it later x_1.head() # Düşürme korelasyonlu özelliklerden sonra, aşağıdaki korelasyon matrisinde de görülebileceği gibi, artık korelasyonlu özellik kalmamıştır. Aslında 0.9 korelasyon değeri olduğunu biliyorum ve görüyorsunuz ama onu düşürmezsek ne olacağını birlikte görelim. # correlation map f, ax = plt.subplots(figsize=(14, 14)) sns.heatmap(x_1.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax) # Peki özelliklerimizi seçiyoruz ama doğru mu seçmişiz? Rastgele ormanı kullanalım ve seçilen özelliklere göre doğruluğu bulalım. from sklearn.preprocessing import StandardScaler def scaler(train_df: pd.DataFrame): sc = StandardScaler() train_df = sc.fit_transform(train_df) return train_df from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score, confusion_matrix from sklearn.metrics import accuracy_score x_1 = scaler(x_1) # split data train 70 % and test 30 % x_train, x_test, y_train, y_test = train_test_split( x_1, y, test_size=0.3, random_state=42 ) # random forest classifier with n_estimators=10 (default) clf_rf = RandomForestClassifier(random_state=43) clr_rf = clf_rf.fit(x_train, y_train) ac = accuracy_score(y_test, clf_rf.predict(x_test)) print("Accuracy is: ", ac) cm = confusion_matrix(y_test, clf_rf.predict(x_test)) sns.heatmap(cm, annot=True, fmt="d") # Doğruluk yaklaşık %95'tir ve karışıklık matrisinden de görülebileceği gibi çok az yanlış tahminde bulunuruz. Şimdi daha iyi sonuçlar bulmak için diğer özellik seçim yöntemlerini görelim. # 2) Tek değişkenli özellik seçimi ve rastgele orman sınıflandırması # Tek değişkenli özellik seçiminde, k en yüksek puanlama özelliği dışındaki tüm özellikleri kaldıran SelectKBest'i kullanacağız. http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectKBest.html#sklearn.feature_selection.SelectKBest # Bu yöntemde kaç tane özellik kullanacağımızı seçmemiz gerekiyor. Örneğin, k (özellik sayısı) 5 mi, 10 mu, 15 mi olacak? Cevap sadece deniyor veya sezgisel olarak. Tüm kombinasyonları denemiyorum ama sadece k = 5'i seçiyorum ve en iyi 5 özelliği buluyorum. from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 # find best scored 5 features select_feature = SelectKBest(chi2, k=5).fit(x_train, y_train) print("Score list:", select_feature.scores_) print("Feature list:", x_train.columns) # Sınıflandırılacak en iyi 5 özellik, rea_mean, area_se, texture_mean, concavity_worst and concavity_mean. Öyleyse, yalnızca bu en iyi puan alan 5 özelliği kullanırsak ne olacağını görelim. x_train_2 = select_feature.transform(x_train) x_test_2 = select_feature.transform(x_test) # random forest classifier with n_estimators=10 (default) clf_rf_2 = RandomForestClassifier() clr_rf_2 = clf_rf_2.fit(x_train_2, y_train) ac_2 = accuracy_score(y_test, clf_rf_2.predict(x_test_2)) print("Accuracy is: ", ac_2) cm_2 = confusion_matrix(y_test, clf_rf_2.predict(x_test_2)) sns.heatmap(cm_2, annot=True, fmt="d") # Doğruluk yaklaşık %96'dır ve karışıklık matrisinden de görülebileceği gibi çok az yanlış tahminde bulunuruz. Şimdiye kadar yaptığımız şey, özellikleri korelasyon matrisine ve selectkBest yöntemine göre seçmekti. SelectkBest yönteminde 5 özellik kullanmamıza rağmen doğrulukları benzer görünmektedir. Şimdi daha iyi sonuçlar bulmak için diğer özellik seçim yöntemlerini görelim. # 3) Rastgele orman ile özyinelemeli özellik eleme (RFE) # http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html Temel olarak, sınıflandırma yöntemlerinden birini kullanır (bizim örneğimizde rastgele orman), her bir özelliğe ağırlık atayın. Mutlak ağırlıkları en küçük olanların mevcut set özelliklerinden budanır. Bu prosedür, budanmış sette istenen sayıda özellik elde edilene kadar yinelemeli olarak tekrarlanır. # Önceki yöntemde olduğu gibi, 5 özellik kullanacağız. Ancak hangi 5 özelliği kullanacağız? Bunları RFE yöntemi ile seçeceğiz. from sklearn.feature_selection import RFE # Create the RFE object and rank each pixel clf_rf_3 = RandomForestClassifier() rfe = RFE(estimator=clf_rf_3, n_features_to_select=5, step=1) rfe = rfe.fit(x_train, y_train) ac = accuracy_score(y_test, rfe.predict(x_test)) print("Accuracy is: ", ac) print("Chosen best 5 feature by rfe:", x_train.columns[rfe.support_]) # rfe tarafından seçilen en iyi 5 özellik, texture_mean, field_mean, concavity_mean, field_se, concavity_worst. Önceki (selectkBest) yöntemine tamamen benzerler. Bu nedenle doğruluğu tekrar hesaplamamıza gerek yok. Kısaca rfe ve selectkBest yöntemleri ile iyi bir özellik seçimi yaptığımızı söyleyebiliriz. Ancak gördüğünüz gibi bir sorun var tamam ben en iyi 5 özelliğini iki farklı yöntemle buluyoruz ve bu özellikler aynı ama neden 5. Belki en iyi 2 veya en iyi 15 özelliğini kullanırsak daha iyi doğruluk elde ederiz. Bu nedenle, rfecv yöntemiyle kaç tane özellik kullanmamız gerektiğine bakalım. # 4) Çapraz doğrulama ve rastgele orman sınıflandırması ile özyinelemeli özellik eleme # http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html Şimdi sadece en iyi özellikleri değil, aynı zamanda en iyi doğruluk için kaç tane özelliğe ihtiyacımız olduğunu da bulacağız. from sklearn.feature_selection import RFECV # The "accuracy" scoring is proportional to the number of correct classifications clf_rf_4 = RandomForestClassifier() rfecv = RFECV( estimator=clf_rf_4, step=1, cv=5, scoring="accuracy" ) # 5-fold cross-validation rfecv = rfecv.fit(x_train, y_train) print("Optimal number of features :", rfecv.n_features_) print("Best features :", x_train.columns[rfecv.support_]) # Son olarak, en iyi sınıflandırma için texture_mean, field_mean, concavity_mean, texture_se, field_se, concavity_se,chemistry_se, smoothness_worst, concavity_worst,chemistry_worst ve fractal_dimension_worst olan en iyi 11 özelliği bulduk. Arsa ile en iyi doğruluğa bakalım. def prob_his(train_df, variable_x, variable_y): fig = px.histogram( train_df, x=variable_x, y=variable_y, color=variable_x, histfunc="avg", marginal="box", barmode="overlay", title=variable_x, width=700, height=500, ) fig.update_layout(bargap=0.1, barmode="stack") fig.show() from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge, RidgeCV from sklearn.svm import SVC, SVR from sklearn.ensemble import ( RandomForestClassifier, RandomForestRegressor, VotingClassifier, ) from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier def Classifiers(x_train, y_train, n_splits): random_state = 42 classifier = [ DecisionTreeClassifier(random_state=random_state), SVC(random_state=random_state), RandomForestClassifier(random_state=random_state), LogisticRegression(random_state=random_state), KNeighborsClassifier(), ] dt_param_grid = { "min_samples_split": range(10, 500, 20), "max_depth": range(1, 20, 2), } svc_param_grid = { "kernel": ["rbf"], "gamma": [0.001, 0.01, 0.1, 1], "C": [1, 10, 50, 100, 200, 300, 1000], "probability": [True], } rf_param_grid = { "max_features": [1, 3, 10], "min_samples_split": [2, 3, 10], "min_samples_leaf": [1, 3, 10], "bootstrap": [False], "n_estimators": [100, 300], "criterion": ["gini"], } logreg_param_grid = { "C": np.logspace(-3, 3, 7), "penalty": ["l1", "l2"], "max_iter": [100, 500, 1000], } knn_param_grid = { "n_neighbors": np.linspace(1, 19, 10, dtype=int).tolist(), "weights": ["uniform", "distance"], "metric": ["euclidean", "manhattan"], } classifier_param = [ dt_param_grid, svc_param_grid, rf_param_grid, logreg_param_grid, knn_param_grid, ] cv_result = [] best_estimators = [] for i in range(len(classifier)): clf = GridSearchCV( classifier[i], param_grid=classifier_param[i], cv=StratifiedKFold(n_splits=n_splits), scoring="accuracy", n_jobs=-1, verbose=1, ) clf.fit(x_train, y_train) cv_result.append(clf.best_score_) best_estimators.append(clf.best_estimator_) print(cv_result[i]) cv_results = pd.DataFrame( { "Cross Validation Means": cv_result, "ML Models": [ "DecisionTreeClassifier", "SVM", "RandomForestClassifier", "LogisticRegression", "KNeighborsClassifier", ], } ) prob_his(cv_results, "Cross Validation Means", "ML Models") return best_estimators, cv_results # n_splits parametresi, verilerin bölüneceği kümelerin sayısını belirten bir parametredir. Bu parametre, kullanılan çapraz doğrulama yöntemi (StratifiedKFold) için belirlenir. # Örneğin, n_splits=5 değeri verildiğinde, veri seti beş eşit parçaya ayrılır ve bu beş parçadan biri test seti olarak, geri kalan dört parça ise eğitim seti olarak kullanılır. Bu işlem beş kez tekrarlanarak her bir parça ayrı ayrı test verisi olarak kullanılır ve sonuçlar ortalaması alınarak bir çapraz doğrulama puanı elde edilir. # Bu sayede, modelin performansı veri setinin farklı parçaları üzerinde değerlendirilir ve aynı veri seti üzerinde overfitting sorunu engellenmeye çalışılır. # `GridSearchCV`, hiperparametre optimizasyonu yapmak için kullanılan bir çapraz doğrulama yöntemidir. Verilen bir makine öğrenmesi modeli ve modelin hiperparametrelerinin olası değerleri, `GridSearchCV` ile belirlenir. # `GridSearchCV` ayrıca bir skorlama metriği belirler ve modelin her bir hiperparametre kombinasyonu için skorlama metriği kullanarak performansını ölçer. # `GridSearchCV`, tüm olası hiperparametre kombinasyonlarını deneyerek en iyi hiperparametrelerin seçilmesini sağlar. Bu, manuel olarak hiperparametreleri ayarlamaktan daha verimlidir. # `GridSearchCV`, hiperparametreler için bir "parametre ızgarası" belirler ve bu ızgarada belirtilen tüm hiperparametre kombinasyonlarını deneyerek en iyi kombinasyonu seçer. Bu ızgara, farklı hiperparametreler ve her bir hiperparametrenin olası değerleri arasındaki tüm kombinasyonları içerebilir. # `GridSearchCV` ayrıca bir çapraz doğrulama yöntemi belirler ve hiperparametrelerin performansını ölçmek için bu yöntemi kullanır. Böylece modelin performansı tüm veri kümesinde değil, farklı veri kümeleri üzerinde ölçülür ve modelin genelleştirilebilirliği arttırılır. best_estimators, cv_results = Classifiers(x_train, y_train, 5) best_estimators votingC def Classifiers_estimators( best_estimators, x_train, y_train, x_test, y_test, name_1_estimator, name_2_estimator, name_3_estimator, one_num, two_num, three_num, ): votingC = VotingClassifier( estimators=[ ("SVC", best_estimators[one_num]), ("rfc", best_estimators[two_num]), ("knn", best_estimators[three_num]), ], voting="soft", n_jobs=-1, ) votingC = votingC.fit(x_train, y_train) # print(votingC) y_predict = votingC.predict(x_test) print("accuracy_score", accuracy_score(votingC.predict(x_test), y_test)) cm = confusion_matrix(y_test, y_predict) print("Confusion Matrix:\n", cm) return votingC, y_predict votingC, y_predict = Classifiers_estimators( best_estimators, x_train, y_train, x_test, y_test, "SVC", "rfc", "knn", 1, 2, 4 ) # Bu bir karışıklık matrisidir ve sınıflandırma modellerinin performansını değerlendirmek için kullanılır. Genellikle, doğru ve yanlış sınıflandırma sayılarını gösterir. # Bu özel karışıklık matrisi, iki sınıfın (Sınıf 0 ve Sınıf 1) olduğu bir sınıflandırma modelinin performansını gösterir. Matris, tahminlerin gerçek sınıflarla nasıl karşılaştırıldığını gösterir. # [[107 1] # [3 60]] # Matrisin sol üst köşesi, gerçek sınıfı 0 olan 107 örneğin doğru bir şekilde tahmin edildiğini gösterir. Sağ üst köşesi, gerçek sınıfı 1 olan ancak yanlışlıkla sınıf 0 olarak tahmin edilen 1 örnek gösterir. # Sol alt köşesi, gerçek sınıfı 0 olan ancak yanlışlıkla sınıf 1 olarak tahmin edilen 3 örnek gösterir. Sağ alt köşesi, gerçek sınıfı 1 olan 60 örneğin doğru bir şekilde tahmin edildiğini gösterir. # Bu matrisi kullanarak, modelin doğruluğunu (Accuracy), hassasiyetini (Precision), duyarlılığını (Recall) ve F1 puanını hesaplayabilirsiniz. # 5) Tree based feature selection and random forest classification # http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html In random forest classification method there is a featureimportances attributes that is the feature importances (the higher, the more important the feature). !!! To use feature_importance method, in training data there should not be correlated features. Random forest choose randomly at each iteration, therefore sequence of feature importance list can change. # clf_rf_5 = RandomForestClassifier() clr_rf_5 = clf_rf_5.fit(x_train, y_train) importances = clr_rf_5.feature_importances_ std = np.std([tree.feature_importances_ for tree in clf_rf.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(x_train.shape[1]): print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]])) # Plot the feature importances of the forest plt.figure(1, figsize=(14, 13)) plt.title("Feature importances") plt.bar( range(x_train.shape[1]), importances[indices], color="g", yerr=std[indices], align="center", ) plt.xticks(range(x_train.shape[1]), x_train.columns[indices], rotation=90) plt.xlim([-1, x_train.shape[1]]) plt.show() # Yukarıdaki grafikte de görebileceğiniz gibi, en iyi 5 özellikten sonra özelliklerin önemi azalır. Bu nedenle bu 5 özelliğe odaklanabiliriz. Daha önce üzüldüğüm gibi, özellikleri anlamaya ve en iyisini bulmaya önem veriyorum. # PCA ile Özellik Çıkarma # http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html Özellik çıkarımı için temel bileşen analizi (PCA) kullanacağız. PCA'dan önce, PCA'nın daha iyi performansı için verileri normalleştirmemiz gerekiyor. # split data train 70 % and test 30 % x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.3, random_state=42 ) # normalization x_train_N = (x_train - x_train.mean()) / (x_train.max() - x_train.min()) x_test_N = (x_test - x_test.mean()) / (x_test.max() - x_test.min()) from sklearn.decomposition import PCA pca = PCA() pca.fit(x_train_N) plt.figure(1, figsize=(14, 13)) plt.clf() plt.axes([0.2, 0.2, 0.7, 0.7]) plt.plot(pca.explained_variance_ratio_, linewidth=2) plt.axis("tight") plt.xlabel("n_components") plt.ylabel("explained_variance_ratio_") # ## Sonuç # Kısaca özellik seçimi ve veri görselleştirmenin önemini göstermeye çalıştım. Varsayılan veriler 33 özelliği içerir, ancak özellik seçiminden sonra bu sayıyı 33'ten 5'e %95 doğrulukla düşürürüz. Bu çekirdekte sadece temel şeyleri denedik, eminim bu veri görselleştirme ve özellik seçme yöntemleriyle, %95 doğruluğu kolaylıkla aşabilirsiniz. Belki diğer sınıflandırma yöntemlerini kullanabilirsiniz. column = dataa1.columns dataa1 pvalues = p_values(dataa1, dataa_s, 569, 30, range(0, 30)) a = [1, 4, 5, 11, 12, 18, 24, 25] for i in a: print("'", column[i], "'", ",", end=" ") drop_list4 = [ "texture_mean", "smoothness_mean", "compactness_mean", "texture_se", "perimeter_se", "symmetry_se", "smoothness_worst", "compactness_worst", ] x_4 = x[drop_list4] x_4.head() x_4["diagnosis"] = y pio.templates.default = "plotly_dark" def create_hist(xval, color): fig = px.histogram( x_4, x=xval, color=color, title=xval, color_discrete_sequence=["yellowgreen", "gold"], width=600, height=300, ) fig.show() create_hist("texture_mean", "diagnosis") create_hist("smoothness_mean", "diagnosis") create_hist("compactness_mean", "diagnosis") create_hist("texture_se", "diagnosis") create_hist("perimeter_se", "diagnosis") create_hist("symmetry_se", "diagnosis") create_hist("smoothness_worst", "diagnosis") create_hist("compactness_worst", "diagnosis") # correlation map f, ax = plt.subplots(figsize=(14, 14)) sns.heatmap(x_4.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax) pio.templates.default = "plotly_dark" def create_scatter(xval, yval): fig = px.scatter( x_4, x=xval, y=yval, color="diagnosis", title=xval + " " + "vs" + " " + yval, color_discrete_sequence=["yellowgreen", "gold"], width=600, height=300, ) fig.show() create_scatter("smoothness_mean", "compactness_mean") create_scatter("smoothness_mean", "compactness_worst") create_scatter("compactness_mean", "compactness_worst") create_scatter("smoothness_worst", "smoothness_mean") create_scatter("compactness_mean", "smoothness_mean") from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder x4 = x_4.drop("diagnosis", axis=1) y4 = x_4["diagnosis"] le = LabelEncoder() y4 = le.fit_transform(y) x_train, x_test, y_train, y_test = train_test_split( x4, y4, test_size=0.2, random_state=0, stratify=y ) x_train.shape, x_test.shape from sklearn.preprocessing import StandardScaler scaler = StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) import tensorflow as tf from tensorflow.keras import Sequential from tensorflow.keras.layers import ( Conv1D, MaxPool1D, Flatten, Dense, Dropout, BatchNormalization, ) from tensorflow.keras.optimizers import Adam x_train = x_train.reshape(455, 8, 1) x_test = x_test.reshape(114, 8, 1) epochs = 50 model = Sequential() model.add(Conv1D(filters=32, kernel_size=2, activation="relu", input_shape=(8, 1))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D(filters=64, kernel_size=2, activation="relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(64, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(1, activation="sigmoid")) model.summary() model.compile( optimizer=Adam(lr=0.00005), loss="binary_crossentropy", metrics=["accuracy"] ) history = model.fit( x_train, y_train, epochs=60, validation_data=(x_test, y_test), verbose=1 ) epoch_range = range(1, 61) plt.plot(epoch_range, history.history["accuracy"]) plt.plot(epoch_range, history.history["val_accuracy"]) plt.title("Model Accuracy") plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend(["Train", "Val"], loc="upper left") plt.show() epoch_range = range(1, 61) plt.plot(epoch_range, history.history["loss"]) plt.plot(epoch_range, history.history["val_loss"]) plt.title("Model Loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.legend(["Train", "Val"], loc="upper left") plt.show() from mlxtend.plotting import plot_confusion_matrix from sklearn.metrics import accuracy_score, confusion_matrix y_pred = model.predict(x_test) rounded_arr = np.round(y_pred, 0) accuracy_score(y_test, rounded_arr) mat = confusion_matrix(y_test, rounded_arr) classes_name = ["Malignant", "Benign"] plot_confusion_matrix(mat, figsize=(10, 8), class_names=classes_name, show_normed=True) plt.xticks(rotation=0)
false
0
11,607
0
12,133
11,607
129082244
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap import seaborn as sns pd.set_option("display.max_columns", 150) import warnings from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso from sklearn.linear_model import ElasticNet from sklearn.ensemble import RandomForestRegressor def set1(fig_size, face_color): sns.set_theme( font_scale=1, rc={ "figure.figsize": fig_size, "axes.facecolor": face_color, "axes.edgecolor": "#AFAFAF", "axes.grid": True, "grid.color": "#E9E9E9", "legend.fontsize": 12, "legend.title_fontsize": 15, }, ) def chart1(ytep, yte, ytrp, ytr, color_code1, color_code2): fig, ax = plt.subplots(figsize=(20, 12)) ax.set_facecolor("#FAFEFC") title_size = 20 ax.scatter(ytep, ytep - yte, color=colores[color_code1], alpha=0.2) ax.scatter(ytrp, ytrp - ytr, color=colores[color_code2], alpha=0.2) ax.tick_params(axis="x", rotation=0) ax.legend(["test", "train"], fontsize=15) ax.xaxis.set_major_formatter(mpl.ticker.StrMethodFormatter("{x:,.0f}")) ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter("{x:,.0f}")) ax.set_xlim(1000, 9000) ax.set_ylim(-2000, 2000) ax.set_title("Yield", fontsize=title_size) plt.show() def modelo(X_train, X_test, y_train, y_test, Model, lasso_y="N", ratio=1.0): if lasso_y == "N": model = Model(alpha=1.0) else: model = Model(alpha=1.0, l1_ratio=ratio) model.fit(X_train, y_train) y_train_pred = model.predict(X_train) y_test_pred = model.predict(X_test) print(y_train_pred.shape) mae_train = np.mean(abs(y_train_pred - y_train)) print(mae_train) mae_test = np.mean(abs(y_test_pred - y_test)) print(mae_test) return y_train_pred, y_test_pred def lineal(X_train, X_test, y_train, y_test): slr = LinearRegression() slr.fit(X_train, y_train) y_train_pred = slr.predict(X_train) y_test_pred = slr.predict(X_test) print(y_train_pred.shape) print(y_train.shape) mae_train = np.mean(abs(y_train_pred - y_train)) print(mae_train) mae_test = np.mean(abs(y_test_pred - y_test)) print(mae_test) return y_train_pred, y_test_pred def forest(X_train, X_test, y_train, y_test): forest = RandomForestRegressor( n_estimators=1000, criterion="squared_error", random_state=42, n_jobs=-1 ) forest.fit(X_train, y_train) y_train_pred = forest.predict(X_train) y_test_pred = forest.predict(X_test) print(y_train_pred.shape) print(y_train.shape) mae_train = np.mean(abs(y_train_pred - y_train)) print(mae_train) mae_test = np.mean(abs(y_test_pred - y_test)) print(mae_test) return y_train_pred, y_test_pred colores = [ "brown", "darkorange", "orange", "tomato", "crimson", "deeppink", "indigo", "peru", "lightcoral", "rosybrown", "navy", "steelblue", "darkviolet", "slategray", "slateblue", "turquoise", "darkslateblue", "mediumslateblue", "mediumpurple", "dodgerblue", "royalblue", "cornflowerblue", "darkgreen", "palegreen", "forestgreen", "gold", "darkkhaki", "teal", "salmon", "orchid", "black", "white", ] df_train = pd.read_csv(dirname + "/" + filenames[1]) print(df_train.info()) df_test = pd.read_csv(dirname + "/" + filenames[2]) print(df_test.info()) columnas_train = df_train.columns columnas_test = df_test.columns print(df_train.describe()) print("=" * 50) print(df_test.describe()) X = df_train.drop(["id", "yield"], axis=1).values y = df_train.loc[:, ["yield"]].values X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) print(y_train.shape) print(y_test.shape) y_train_pred, y_test_pred = lineal(X_train, X_test, y_train, y_test) chart1(y_test_pred, y_test, y_train_pred, y_train, 0, 11) y_train_pred, y_test_pred = modelo(X_train, X_test, y_train, y_test, Ridge) chart1(y_test_pred, y_test, y_train_pred, y_train, 12, 5) print(len(y_train_pred)) print(len(y_train)) print(len(y_test_pred)) print(len(y_test)) y_train_pred, y_test_pred = modelo(X_train, X_test, y_train, y_test, ElasticNet, "Y") y_train_pred = y_train_pred.reshape((10702, 1)) y_test_pred = y_test_pred.reshape((4587, 1)) chart1(y_test_pred, y_test, y_train_pred, y_train, 4, 16) y_train_pred, y_test_pred = modelo( X_train, X_test, y_train, y_test, ElasticNet, "Y", 0.3 ) y_train_pred = y_train_pred.reshape((10702, 1)) y_test_pred = y_test_pred.reshape((4587, 1)) chart1(y_test_pred, y_test, y_train_pred, y_train, 2, 20) y_train = y_train.reshape((10702,)) y_test = y_test.reshape((4587,)) y_train_pred, y_test_pred = forest(X_train, X_test, y_train, y_test) chart1(y_test_pred, y_test, y_train_pred, y_train, 4, 11) X_pred = df_test.drop(["id"], axis=1).values slr.fit(X, y) y_pred = model.predict(X_pred) print(X_pred.shape) print(y_pred.shape) X_pred = df_test.drop(["id"], axis=1).values y_pred = slr.predict(X_pred) print(X_pred.shape) print(y_pred.shape) y_pred = y_pred.reshape((10194,)) print(y_pred.shape) df_sub = pd.DataFrame({"yield": y_pred}) df_sub = df_test.join(df_sub) sub_230510 = df_sub.loc[:, ["id", "yield"]] sub_230510.to_csv("00_sub_230510.csv", index=False) X_pred = df_test.drop(["id"], axis=1).values Model = Ridge model = Model(alpha=1.0) model.fit(X, y) y_pred = model.predict(X_pred) print(X_pred.shape) print(y_pred.shape) X_pred = df_test.drop(["id"], axis=1).values y_pred = model.predict(X_pred) print(X_pred.shape) print(y_pred.shape) y_pred = y_pred.reshape((10194,)) print(y_pred.shape) df_sub = pd.DataFrame({"yield": y_pred}) df_sub = df_test.join(df_sub) sub_230510 = df_sub.loc[:, ["id", "yield"]] sub_230510.to_csv("01_sub_230510.csv", index=False) y = y.reshape((15289,)) forest = RandomForestRegressor( n_estimators=1000, criterion="squared_error", random_state=42, n_jobs=-1 ) forest.fit(X, y) X_pred = df_test.drop(["id"], axis=1).values y_pred = forest.predict(X_pred) print(X_pred.shape) print(y_pred.shape) df_sub = pd.DataFrame({"yield": y_pred}) df_sub = df_test.join(df_sub) sub_230510 = df_sub.loc[:, ["id", "yield"]] sub_230510.to_csv("02_sub_230510.csv", index=False) # # Selection df_sel = df_train[df_train["honeybee"] < 6] # X = df_sel.loc[:, ['clonesize', 'honeybee', 'fruitset']].values X = df_sel.drop(["id", "yield"], axis=1).values y = df_sel.loc[:, ["yield"]].values X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) print(y_train.shape) print(y_test.shape) y_train_pred, y_test_pred = lineal(X_train, X_test, y_train, y_test) chart1(y_test_pred, y_test, y_train_pred, y_train, 0, 11) X_pred = df_test.drop(["id"], axis=1).values slr.fit(X, y) y_pred = model.predict(X_pred) print(X_pred.shape) print(y_pred.shape) X_pred = df_test.drop(["id"], axis=1).values y_pred = slr.predict(X_pred) print(X_pred.shape) print(y_pred.shape) y_pred = y_pred.reshape((10194,)) print(y_pred.shape) df_sub = pd.DataFrame({"yield": y_pred}) df_sub = df_test.join(df_sub) sub_230510 = df_sub.loc[:, ["id", "yield"]] sub_230510.to_csv("03_sub_230510.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/082/129082244.ipynb
null
null
[{"Id": 129082244, "ScriptId": 38321751, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 294322, "CreationDate": "05/10/2023 21:21:36", "VersionNumber": 1.0, "Title": "The Accountant - Mudi Burusu", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 250.0, "LinesInsertedFromPrevious": 250.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap import seaborn as sns pd.set_option("display.max_columns", 150) import warnings from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso from sklearn.linear_model import ElasticNet from sklearn.ensemble import RandomForestRegressor def set1(fig_size, face_color): sns.set_theme( font_scale=1, rc={ "figure.figsize": fig_size, "axes.facecolor": face_color, "axes.edgecolor": "#AFAFAF", "axes.grid": True, "grid.color": "#E9E9E9", "legend.fontsize": 12, "legend.title_fontsize": 15, }, ) def chart1(ytep, yte, ytrp, ytr, color_code1, color_code2): fig, ax = plt.subplots(figsize=(20, 12)) ax.set_facecolor("#FAFEFC") title_size = 20 ax.scatter(ytep, ytep - yte, color=colores[color_code1], alpha=0.2) ax.scatter(ytrp, ytrp - ytr, color=colores[color_code2], alpha=0.2) ax.tick_params(axis="x", rotation=0) ax.legend(["test", "train"], fontsize=15) ax.xaxis.set_major_formatter(mpl.ticker.StrMethodFormatter("{x:,.0f}")) ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter("{x:,.0f}")) ax.set_xlim(1000, 9000) ax.set_ylim(-2000, 2000) ax.set_title("Yield", fontsize=title_size) plt.show() def modelo(X_train, X_test, y_train, y_test, Model, lasso_y="N", ratio=1.0): if lasso_y == "N": model = Model(alpha=1.0) else: model = Model(alpha=1.0, l1_ratio=ratio) model.fit(X_train, y_train) y_train_pred = model.predict(X_train) y_test_pred = model.predict(X_test) print(y_train_pred.shape) mae_train = np.mean(abs(y_train_pred - y_train)) print(mae_train) mae_test = np.mean(abs(y_test_pred - y_test)) print(mae_test) return y_train_pred, y_test_pred def lineal(X_train, X_test, y_train, y_test): slr = LinearRegression() slr.fit(X_train, y_train) y_train_pred = slr.predict(X_train) y_test_pred = slr.predict(X_test) print(y_train_pred.shape) print(y_train.shape) mae_train = np.mean(abs(y_train_pred - y_train)) print(mae_train) mae_test = np.mean(abs(y_test_pred - y_test)) print(mae_test) return y_train_pred, y_test_pred def forest(X_train, X_test, y_train, y_test): forest = RandomForestRegressor( n_estimators=1000, criterion="squared_error", random_state=42, n_jobs=-1 ) forest.fit(X_train, y_train) y_train_pred = forest.predict(X_train) y_test_pred = forest.predict(X_test) print(y_train_pred.shape) print(y_train.shape) mae_train = np.mean(abs(y_train_pred - y_train)) print(mae_train) mae_test = np.mean(abs(y_test_pred - y_test)) print(mae_test) return y_train_pred, y_test_pred colores = [ "brown", "darkorange", "orange", "tomato", "crimson", "deeppink", "indigo", "peru", "lightcoral", "rosybrown", "navy", "steelblue", "darkviolet", "slategray", "slateblue", "turquoise", "darkslateblue", "mediumslateblue", "mediumpurple", "dodgerblue", "royalblue", "cornflowerblue", "darkgreen", "palegreen", "forestgreen", "gold", "darkkhaki", "teal", "salmon", "orchid", "black", "white", ] df_train = pd.read_csv(dirname + "/" + filenames[1]) print(df_train.info()) df_test = pd.read_csv(dirname + "/" + filenames[2]) print(df_test.info()) columnas_train = df_train.columns columnas_test = df_test.columns print(df_train.describe()) print("=" * 50) print(df_test.describe()) X = df_train.drop(["id", "yield"], axis=1).values y = df_train.loc[:, ["yield"]].values X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) print(y_train.shape) print(y_test.shape) y_train_pred, y_test_pred = lineal(X_train, X_test, y_train, y_test) chart1(y_test_pred, y_test, y_train_pred, y_train, 0, 11) y_train_pred, y_test_pred = modelo(X_train, X_test, y_train, y_test, Ridge) chart1(y_test_pred, y_test, y_train_pred, y_train, 12, 5) print(len(y_train_pred)) print(len(y_train)) print(len(y_test_pred)) print(len(y_test)) y_train_pred, y_test_pred = modelo(X_train, X_test, y_train, y_test, ElasticNet, "Y") y_train_pred = y_train_pred.reshape((10702, 1)) y_test_pred = y_test_pred.reshape((4587, 1)) chart1(y_test_pred, y_test, y_train_pred, y_train, 4, 16) y_train_pred, y_test_pred = modelo( X_train, X_test, y_train, y_test, ElasticNet, "Y", 0.3 ) y_train_pred = y_train_pred.reshape((10702, 1)) y_test_pred = y_test_pred.reshape((4587, 1)) chart1(y_test_pred, y_test, y_train_pred, y_train, 2, 20) y_train = y_train.reshape((10702,)) y_test = y_test.reshape((4587,)) y_train_pred, y_test_pred = forest(X_train, X_test, y_train, y_test) chart1(y_test_pred, y_test, y_train_pred, y_train, 4, 11) X_pred = df_test.drop(["id"], axis=1).values slr.fit(X, y) y_pred = model.predict(X_pred) print(X_pred.shape) print(y_pred.shape) X_pred = df_test.drop(["id"], axis=1).values y_pred = slr.predict(X_pred) print(X_pred.shape) print(y_pred.shape) y_pred = y_pred.reshape((10194,)) print(y_pred.shape) df_sub = pd.DataFrame({"yield": y_pred}) df_sub = df_test.join(df_sub) sub_230510 = df_sub.loc[:, ["id", "yield"]] sub_230510.to_csv("00_sub_230510.csv", index=False) X_pred = df_test.drop(["id"], axis=1).values Model = Ridge model = Model(alpha=1.0) model.fit(X, y) y_pred = model.predict(X_pred) print(X_pred.shape) print(y_pred.shape) X_pred = df_test.drop(["id"], axis=1).values y_pred = model.predict(X_pred) print(X_pred.shape) print(y_pred.shape) y_pred = y_pred.reshape((10194,)) print(y_pred.shape) df_sub = pd.DataFrame({"yield": y_pred}) df_sub = df_test.join(df_sub) sub_230510 = df_sub.loc[:, ["id", "yield"]] sub_230510.to_csv("01_sub_230510.csv", index=False) y = y.reshape((15289,)) forest = RandomForestRegressor( n_estimators=1000, criterion="squared_error", random_state=42, n_jobs=-1 ) forest.fit(X, y) X_pred = df_test.drop(["id"], axis=1).values y_pred = forest.predict(X_pred) print(X_pred.shape) print(y_pred.shape) df_sub = pd.DataFrame({"yield": y_pred}) df_sub = df_test.join(df_sub) sub_230510 = df_sub.loc[:, ["id", "yield"]] sub_230510.to_csv("02_sub_230510.csv", index=False) # # Selection df_sel = df_train[df_train["honeybee"] < 6] # X = df_sel.loc[:, ['clonesize', 'honeybee', 'fruitset']].values X = df_sel.drop(["id", "yield"], axis=1).values y = df_sel.loc[:, ["yield"]].values X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) print(y_train.shape) print(y_test.shape) y_train_pred, y_test_pred = lineal(X_train, X_test, y_train, y_test) chart1(y_test_pred, y_test, y_train_pred, y_train, 0, 11) X_pred = df_test.drop(["id"], axis=1).values slr.fit(X, y) y_pred = model.predict(X_pred) print(X_pred.shape) print(y_pred.shape) X_pred = df_test.drop(["id"], axis=1).values y_pred = slr.predict(X_pred) print(X_pred.shape) print(y_pred.shape) y_pred = y_pred.reshape((10194,)) print(y_pred.shape) df_sub = pd.DataFrame({"yield": y_pred}) df_sub = df_test.join(df_sub) sub_230510 = df_sub.loc[:, ["id", "yield"]] sub_230510.to_csv("03_sub_230510.csv", index=False)
false
0
3,097
0
3,097
3,097
129082191
# ## Data in motion :Data Analysis Challenge Week 20 # #### Importing packages import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt financial = pd.read_csv( "https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/09_Time_Series/Apple_Stock/appl_1980_2014.csv" ) financial # ### 1. Check out the type of the columns financial.dtypes # ### 2. Transform the Date column as a datetime type financial["Date"] = pd.to_datetime(financial["Date"]) financial["Date"] # ### 3. Set the date as the index financial.set_index("Date") # ### 4. Is there any duplicate dates? financial.duplicated # ### 5. Sort the index so the first entry is the oldest date financial.sort_index(ascending=False) # ### 6. Get the last business day of each month last_days = financial.groupby( [financial["Date"].dt.year, financial["Date"].dt.month] ).last() last_bdays = last_days["Date"] - pd.offsets.BDay() print(last_bdays) # ### 7. What is the difference in days between the first day and the oldest print( f'There is a difference of {financial["Date"].iloc[0]-financial["Date"].iloc[-1]} ' ) # ### 8. How many months in the data do we have start_date = financial["Date"].min() end_date = financial["Date"].max() num_months = ( (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month) + 1 ) print(f"There are {num_months} months in the data") # ### 9. Plot the 'Adj Close' value plt.plot(financial["Date"], financial["Adj Close"]) plt.title("Adjusted Closing Price over Time") plt.xlabel("Date") plt.ylabel("Adj Close") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/082/129082191.ipynb
null
null
[{"Id": 129082191, "ScriptId": 38372955, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10126182, "CreationDate": "05/10/2023 21:20:46", "VersionNumber": 1.0, "Title": "Data in motion : Data Analysis Challenge Week 20", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 63.0, "LinesInsertedFromPrevious": 63.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
# ## Data in motion :Data Analysis Challenge Week 20 # #### Importing packages import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt financial = pd.read_csv( "https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/09_Time_Series/Apple_Stock/appl_1980_2014.csv" ) financial # ### 1. Check out the type of the columns financial.dtypes # ### 2. Transform the Date column as a datetime type financial["Date"] = pd.to_datetime(financial["Date"]) financial["Date"] # ### 3. Set the date as the index financial.set_index("Date") # ### 4. Is there any duplicate dates? financial.duplicated # ### 5. Sort the index so the first entry is the oldest date financial.sort_index(ascending=False) # ### 6. Get the last business day of each month last_days = financial.groupby( [financial["Date"].dt.year, financial["Date"].dt.month] ).last() last_bdays = last_days["Date"] - pd.offsets.BDay() print(last_bdays) # ### 7. What is the difference in days between the first day and the oldest print( f'There is a difference of {financial["Date"].iloc[0]-financial["Date"].iloc[-1]} ' ) # ### 8. How many months in the data do we have start_date = financial["Date"].min() end_date = financial["Date"].max() num_months = ( (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month) + 1 ) print(f"There are {num_months} months in the data") # ### 9. Plot the 'Adj Close' value plt.plot(financial["Date"], financial["Adj Close"]) plt.title("Adjusted Closing Price over Time") plt.xlabel("Date") plt.ylabel("Adj Close") plt.show()
false
0
508
2
508
508
129082860
# # Wild BlueBerry Yield Predictions # Submissions will be evaluated using Mean Absolute Error (MAE) import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) import warnings warnings.filterwarnings("ignore") import matplotlib.pyplot as plt import seaborn as sns # ## Reading the dataset df = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") df.head(10) # ## Exploratory Data Analysis df.info() df.describe().T df.isna().sum() df.shape # Learn some more exploratory vizualization for numerical variables # ## Feature selection using Correlation matrix f, ax = plt.subplots(figsize=(25, 15)) sns.heatmap(df.corr(), annot=True, vmin=-1, vmax=1) # There are few columns which has perfect co-linearity, # Delete these columns corr_matrix = df.corr().abs() upper_tri = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool)) to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > 0.95)] df_new = df.drop(df[to_drop], axis=1) test_new = test.drop(test[to_drop], axis=1) df_new.head() # ## Train Test Split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( df_new.drop(["yield"], axis=1), df_new["yield"] ) X_train.head() # ## 1. Linear Regression from sklearn.linear_model import LinearRegression linreg = LinearRegression() linreg.fit(X_train, y_train) y_pred = linreg.predict(X_test) from sklearn.metrics import mean_absolute_percentage_error import math from sklearn import metrics mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) results = pd.DataFrame( [["Linear Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results ## 2. RidgeCV Regression # from sklearn.linear_model import Ridge # ridreg = Ridge(random_state=3) # ridreg.fit(X_train,y_train) # y_pred = ridreg.predict(X_test) # mae = metrics.mean_absolute_error(y_test, y_pred) # mape = mean_absolute_percentage_error(y_test, y_pred) # mse = metrics.mean_squared_error(y_test, y_pred) # r2 = metrics.r2_score(y_test, y_pred) # rmse = math.sqrt(mse) # temp_results = pd.DataFrame([['Ridge Regression', mae, mape, mse, r2, rmse]], columns=['Model', 'MAE', 'MAPE', 'MSE', 'R2','RMSE']) # results = pd.concat([results,temp_results], ignore_index=True) # results # ## 2. RidgeCV regession from sklearn.linear_model import RidgeCV alphas = [0.1, 0.3, 0.5, 0.7, 0.9, 1.0] ridregcv = RidgeCV(alphas=alphas) ridregcv.fit(X_train, y_train) y_pred = ridregcv.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Ridge CV Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 3. Lasso Regression from sklearn.linear_model import LassoCV alphas = [0.1, 0.3, 0.5, 0.7, 0.9, 1.0] lassocv = LassoCV( alphas=alphas, cv=10, random_state=3, ) lassocv.fit(X_train, y_train) y_pred = lassocv.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Lasso Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 4.Elastic Net Regression from sklearn.linear_model import ElasticNetCV ela_reg = ElasticNetCV(cv=10, random_state=3) ela_reg.fit(X_train, y_train) y_pred = ela_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["ElasticNetCV Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 5. Support Vector Regression from sklearn.svm import SVR svr_reg = SVR() svr_reg.fit(X_train, y_train) y_pred = svr_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Support Vector Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 6. Decision Tree Regression from sklearn.tree import DecisionTreeRegressor dt_reg = DecisionTreeRegressor(random_state=3) dt_reg.fit(X_train, y_train) y_pred = dt_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Decision Tree Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 7. Random Forest Regression from sklearn.ensemble import RandomForestRegressor ranfor_reg = RandomForestRegressor(max_depth=2, random_state=3) ranfor_reg.fit(X_train, y_train) y_pred = ranfor_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Random Forest Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 8. Xgboost Regressor import xgboost as xgb xbg_reg = xgb.XGBRegressor() xbg_reg.fit(X_train, y_train) y_pred = xbg_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["XGB Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 9. LightGBM Regression from lightgbm import LGBMRegressor lgb_reg = LGBMRegressor() lgb_reg.fit(X_train, y_train) y_pred = lgb_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["LGBM Regressor", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 10. CatBoost Regression import catboost as cb cb_reg = cb.CatBoostRegressor() cb_reg.fit(X_train, y_train, verbose=False) y_pred = cb_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["CatBoost Regressor", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 11. Decision Tree Regressor with GridSearchCV from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV dtree = DecisionTreeRegressor() param_grid = { "max_depth": [2, 4, 6, 8], "min_samples_split": [2, 4, 6, 8], "min_samples_leaf": [1, 2, 3, 4], "max_features": ["auto", "sqrt", "log2"], "random_state": [0, 42], } grid_search = GridSearchCV(dtree, param_grid, cv=5, scoring="neg_mean_squared_error") grid_search.fit(X_train, y_train) y_pred = grid_search.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Decision Tree Regressor CV", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # SO,, after applying 10 Regression base models on our dataset, we can see that **LGBM Regressor** has performed the best, followed closely by **CatBoost Regressor**, and Linear Regression, Ridge Regression, Lasso Regression & XGB Regression also have fairly low score. # So let's Tune Hyperparameters for all these regressions and find the best model # # Hyper Parameter Tuning # ## 1. CatBoost Regressor from catboost import CatBoostRegressor from sklearn.model_selection import GridSearchCV params = { "learning_rate": [0.01, 0.05, 0.1], "depth": [3, 5, 7], "l2_leaf_reg": [1, 3, 5], "iterations": [100, 200], } model = CatBoostRegressor() grid = GridSearchCV( estimator=model, param_grid=params, scoring="neg_mean_squared_error", cv=5 ) # Set up the cross-validation scheme cv = 5 # Train and evaluate the model with each combination of hyperparameters grid.fit(X_train, y_train, verbose=True) # Get the best hyperparameters from grid search and use them to train a final model best_params = grid.best_params_ best_catboost = CatBoostRegressor(**best_params) best_catboost.fit(X_train, y_train) y_pred = best_catboost.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Best CatBoost Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 2. LGBM Regressor model = LGBMRegressor() params = { "learning_rate": [0.01, 0.05, 0.1], "num_leaves": [31, 61, 101], "max_depth": [3, 5, 7], "min_child_samples": [20, 40, 60], "n_estimators": [50, 100], "boosting_type": ["gbdt", "dart", "goss"], } grid = GridSearchCV( estimator=model, param_grid=params, scoring="neg_mean_squared_error", cv=2 ) grid.fit(X_train, y_train, verbose=True) best_params = grid.best_params_ best_lbgmreg = LGBMRegressor(**best_params) best_lbgmreg.fit(X_train, y_train) y_pred = best_lbgmreg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Best LGBM Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 3. XGB Regression params = { "learning_rate": [0.01, 0.05, 0.1], "max_depth": [3, 5, 7], "n_estimators": [5], "gamma": [0, 0.1, 0.5, 1], "subsample": [0.5, 0.7, 1.0], "reg_alpha": [0, 0.1, 1], "reg_lambda": [0, 0.1, 1], } model = xgb.XGBRegressor() grid = GridSearchCV(estimator=model, param_grid=params, cv=5, verbose=True) grid.fit(X_train, y_train) best_params = grid.best_params_ best_xgbreg = xgb.XGBRegressor(**best_params) y_pred = best_xgbreg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Best XGB Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # After comparing with the hyper-parameter tuned models, we can find ** ** model to be the best fit # # Final Model # Catboost Regression predictions = best_catboost.predict(test_new) output = pd.DataFrame({"id": test.id, "yield": predictions}) output.to_csv("submission.csv", index=False) print("Your submission was successfully saved!")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/082/129082860.ipynb
null
null
[{"Id": 129082860, "ScriptId": 38352849, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14462934, "CreationDate": "05/10/2023 21:31:22", "VersionNumber": 1.0, "Title": "Blue Berry Predictions 2", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 405.0, "LinesInsertedFromPrevious": 405.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Wild BlueBerry Yield Predictions # Submissions will be evaluated using Mean Absolute Error (MAE) import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) import warnings warnings.filterwarnings("ignore") import matplotlib.pyplot as plt import seaborn as sns # ## Reading the dataset df = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv") test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv") df.head(10) # ## Exploratory Data Analysis df.info() df.describe().T df.isna().sum() df.shape # Learn some more exploratory vizualization for numerical variables # ## Feature selection using Correlation matrix f, ax = plt.subplots(figsize=(25, 15)) sns.heatmap(df.corr(), annot=True, vmin=-1, vmax=1) # There are few columns which has perfect co-linearity, # Delete these columns corr_matrix = df.corr().abs() upper_tri = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool)) to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > 0.95)] df_new = df.drop(df[to_drop], axis=1) test_new = test.drop(test[to_drop], axis=1) df_new.head() # ## Train Test Split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( df_new.drop(["yield"], axis=1), df_new["yield"] ) X_train.head() # ## 1. Linear Regression from sklearn.linear_model import LinearRegression linreg = LinearRegression() linreg.fit(X_train, y_train) y_pred = linreg.predict(X_test) from sklearn.metrics import mean_absolute_percentage_error import math from sklearn import metrics mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) results = pd.DataFrame( [["Linear Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results ## 2. RidgeCV Regression # from sklearn.linear_model import Ridge # ridreg = Ridge(random_state=3) # ridreg.fit(X_train,y_train) # y_pred = ridreg.predict(X_test) # mae = metrics.mean_absolute_error(y_test, y_pred) # mape = mean_absolute_percentage_error(y_test, y_pred) # mse = metrics.mean_squared_error(y_test, y_pred) # r2 = metrics.r2_score(y_test, y_pred) # rmse = math.sqrt(mse) # temp_results = pd.DataFrame([['Ridge Regression', mae, mape, mse, r2, rmse]], columns=['Model', 'MAE', 'MAPE', 'MSE', 'R2','RMSE']) # results = pd.concat([results,temp_results], ignore_index=True) # results # ## 2. RidgeCV regession from sklearn.linear_model import RidgeCV alphas = [0.1, 0.3, 0.5, 0.7, 0.9, 1.0] ridregcv = RidgeCV(alphas=alphas) ridregcv.fit(X_train, y_train) y_pred = ridregcv.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Ridge CV Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 3. Lasso Regression from sklearn.linear_model import LassoCV alphas = [0.1, 0.3, 0.5, 0.7, 0.9, 1.0] lassocv = LassoCV( alphas=alphas, cv=10, random_state=3, ) lassocv.fit(X_train, y_train) y_pred = lassocv.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Lasso Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 4.Elastic Net Regression from sklearn.linear_model import ElasticNetCV ela_reg = ElasticNetCV(cv=10, random_state=3) ela_reg.fit(X_train, y_train) y_pred = ela_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["ElasticNetCV Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 5. Support Vector Regression from sklearn.svm import SVR svr_reg = SVR() svr_reg.fit(X_train, y_train) y_pred = svr_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Support Vector Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 6. Decision Tree Regression from sklearn.tree import DecisionTreeRegressor dt_reg = DecisionTreeRegressor(random_state=3) dt_reg.fit(X_train, y_train) y_pred = dt_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Decision Tree Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 7. Random Forest Regression from sklearn.ensemble import RandomForestRegressor ranfor_reg = RandomForestRegressor(max_depth=2, random_state=3) ranfor_reg.fit(X_train, y_train) y_pred = ranfor_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Random Forest Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 8. Xgboost Regressor import xgboost as xgb xbg_reg = xgb.XGBRegressor() xbg_reg.fit(X_train, y_train) y_pred = xbg_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["XGB Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 9. LightGBM Regression from lightgbm import LGBMRegressor lgb_reg = LGBMRegressor() lgb_reg.fit(X_train, y_train) y_pred = lgb_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["LGBM Regressor", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 10. CatBoost Regression import catboost as cb cb_reg = cb.CatBoostRegressor() cb_reg.fit(X_train, y_train, verbose=False) y_pred = cb_reg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["CatBoost Regressor", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 11. Decision Tree Regressor with GridSearchCV from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import GridSearchCV dtree = DecisionTreeRegressor() param_grid = { "max_depth": [2, 4, 6, 8], "min_samples_split": [2, 4, 6, 8], "min_samples_leaf": [1, 2, 3, 4], "max_features": ["auto", "sqrt", "log2"], "random_state": [0, 42], } grid_search = GridSearchCV(dtree, param_grid, cv=5, scoring="neg_mean_squared_error") grid_search.fit(X_train, y_train) y_pred = grid_search.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Decision Tree Regressor CV", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # SO,, after applying 10 Regression base models on our dataset, we can see that **LGBM Regressor** has performed the best, followed closely by **CatBoost Regressor**, and Linear Regression, Ridge Regression, Lasso Regression & XGB Regression also have fairly low score. # So let's Tune Hyperparameters for all these regressions and find the best model # # Hyper Parameter Tuning # ## 1. CatBoost Regressor from catboost import CatBoostRegressor from sklearn.model_selection import GridSearchCV params = { "learning_rate": [0.01, 0.05, 0.1], "depth": [3, 5, 7], "l2_leaf_reg": [1, 3, 5], "iterations": [100, 200], } model = CatBoostRegressor() grid = GridSearchCV( estimator=model, param_grid=params, scoring="neg_mean_squared_error", cv=5 ) # Set up the cross-validation scheme cv = 5 # Train and evaluate the model with each combination of hyperparameters grid.fit(X_train, y_train, verbose=True) # Get the best hyperparameters from grid search and use them to train a final model best_params = grid.best_params_ best_catboost = CatBoostRegressor(**best_params) best_catboost.fit(X_train, y_train) y_pred = best_catboost.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Best CatBoost Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 2. LGBM Regressor model = LGBMRegressor() params = { "learning_rate": [0.01, 0.05, 0.1], "num_leaves": [31, 61, 101], "max_depth": [3, 5, 7], "min_child_samples": [20, 40, 60], "n_estimators": [50, 100], "boosting_type": ["gbdt", "dart", "goss"], } grid = GridSearchCV( estimator=model, param_grid=params, scoring="neg_mean_squared_error", cv=2 ) grid.fit(X_train, y_train, verbose=True) best_params = grid.best_params_ best_lbgmreg = LGBMRegressor(**best_params) best_lbgmreg.fit(X_train, y_train) y_pred = best_lbgmreg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Best LGBM Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # ## 3. XGB Regression params = { "learning_rate": [0.01, 0.05, 0.1], "max_depth": [3, 5, 7], "n_estimators": [5], "gamma": [0, 0.1, 0.5, 1], "subsample": [0.5, 0.7, 1.0], "reg_alpha": [0, 0.1, 1], "reg_lambda": [0, 0.1, 1], } model = xgb.XGBRegressor() grid = GridSearchCV(estimator=model, param_grid=params, cv=5, verbose=True) grid.fit(X_train, y_train) best_params = grid.best_params_ best_xgbreg = xgb.XGBRegressor(**best_params) y_pred = best_xgbreg.predict(X_test) mae = metrics.mean_absolute_error(y_test, y_pred) mape = mean_absolute_percentage_error(y_test, y_pred) mse = metrics.mean_squared_error(y_test, y_pred) r2 = metrics.r2_score(y_test, y_pred) rmse = math.sqrt(mse) temp_results = pd.DataFrame( [["Best XGB Regression", mae, mape, mse, r2, rmse]], columns=["Model", "MAE", "MAPE", "MSE", "R2", "RMSE"], ) results = pd.concat([results, temp_results], ignore_index=True) results # After comparing with the hyper-parameter tuned models, we can find ** ** model to be the best fit # # Final Model # Catboost Regression predictions = best_catboost.predict(test_new) output = pd.DataFrame({"id": test.id, "yield": predictions}) output.to_csv("submission.csv", index=False) print("Your submission was successfully saved!")
false
0
4,939
0
4,939
4,939
129161223
# importing of modules for CIFAR-10 CNN from tensorflow.keras.datasets import cifar10, cifar100 from tensorflow.keras import utils from tensorflow.keras.models import Sequential from tensorflow.keras.layers import ( Dense, Dropout, Activation, Flatten, SpatialDropout2D, ) from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.optimizers import SGD, Adam, RMSprop import tensorflow as tf from sklearn.model_selection import GridSearchCV from scikeras.wrappers import KerasClassifier from sklearn.model_selection import GridSearchCV import numpy as np import pandas as pd import keras_tuner from tensorflow import keras from tensorflow.keras import layers import matplotlib import matplotlib.pyplot as plt from IPython import display import pandas as pd from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import backend as K from sklearn.metrics import classification_report import cv2 print("Libraries imported.") # training constants BATCH_SIZE = 64 N_EPOCH = 100 N_CLASSES = 100 VERBOSE = 1 VALIDATION_SPLIT = 0.2 IMG_CHANNELS = 3 IMG_ROWS = 32 IMG_COLS = 32 (input_X_train, output_y_train), (input_X_test, output_y_test) = cifar100.load_data() print("input_X_train shape:", input_X_train.shape) print(input_X_train.shape[0], "train samples") print(input_X_test.shape[0], "test samples") # convert to categorical output_Y_train = utils.to_categorical(output_y_train, N_CLASSES) output_Y_test = utils.to_categorical(output_y_test, N_CLASSES) # float and normalization input_X_train = input_X_train.astype("float32") input_X_test = input_X_test.astype("float32") input_X_train /= 255 input_X_test /= 255 plt.figure(figsize=(10, 10)) for i in range(4 * 4): plt.subplot(4, 4, i + 1) plt.imshow(input_X_train[i]) plt.axis("off") plt.title(output_y_train[i]) plt.show() # # Hyperparameter Tuning with Keras tuner # ## Using Bayesian optimizer # Take VGG as the baseline model def build_model(hp): inputs = keras.Input(shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)) x = inputs # first CONV => RELU => POOL layer set x = layers.Conv2D( hp.Int("conv_1", 32, 64, step=32), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.Conv2D( hp.Int("conv_1", 32, 64, step=32), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(hp.Float("dropout_1", 0, 0.4, step=0.2))(x) # second CONV => RELU => POOL layer set x = layers.Conv2D( hp.Int("conv_2", 64, 128, step=32), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.Conv2D( hp.Int("conv_2", 64, 128, step=32), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(hp.Float("dropout_2", 0, 0.4, step=0.2))(x) # third CONV => RELU => POOL layer set x = layers.Conv2D( hp.Int("conv_3", 128, 256, step=64), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="valid", activation="relu", )(x) x = layers.Conv2D( hp.Int("conv_3", 128, 256, step=64), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="valid", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(hp.Float("dropout_3", 0, 0.4, step=0.2))(x) # Fully connected layer x = layers.Flatten()(x) x = layers.Dense( hp.Int("dense_units", min_value=128, max_value=384, step=128), activation="relu", kernel_initializer="he_uniform", )(x) # Output layer x = layers.Dropout(0.2)(x) outputs = layers.Dense(units=N_CLASSES, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) # initialize the learning rate choices and optimizer lr = hp.Choice("learning_rate", values=[1e-1, 1e-3]) opt = hp.Choice("optimizer", values=["SGD", "Adam"]) optimizer = SGD(learning_rate=lr) if opt == "Adam": optimizer = Adam(learning_rate=lr) model.compile( loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer ) return model # Stop training if validation loss does not improve after 10 epochs es = EarlyStopping(monitor="val_loss", patience=20, restore_best_weights=True) print("Instantiating a bayesian optimization tuner object...") tuner = keras_tuner.BayesianOptimization( build_model, objective="val_accuracy", max_trials=10, seed=42, directory="/kaggle/working/temp/tb_logs_bayesian", ) # ### Best hyperparemeter searching print("Performing hyperparameter search...") tuner.search( x=input_X_train, y=output_Y_train, validation_data=(input_X_test, output_Y_test), batch_size=BATCH_SIZE, callbacks=[es], epochs=N_EPOCH, ) bestHP = tuner.get_best_hyperparameters(num_trials=1)[0] # identify the best hyperparameters print("Optimal number of filters in conv_1 layer: {}".format(bestHP.get("conv_1"))) print("Optimal number of filters in conv_2 layer: {}".format(bestHP.get("conv_2"))) print("Optimal number of filters in conv_3 layer: {}".format(bestHP.get("conv_3"))) print("Optimal number of units in dense layer: {}".format(bestHP.get("dense_units"))) print("Drop-out rate in 1st layer: {}".format(bestHP.get("dropout_1"))) print("Drop-out rate in 2nd layer: {}".format(bestHP.get("dropout_2"))) print("Drop-out rate in 3rd layer: {}".format(bestHP.get("dropout_3"))) print("Optimal learning rate: {:.4f}".format(bestHP.get("learning_rate"))) print("Better Optimizer: {}".format(bestHP.get("optimizer"))) tuner.results_summary() # ## GRID Search # convo_1, convo_2, convo_3, dense units, drop outs from the previous steps # Perform grid search on kernel size, learning rate & optimizers def build_model(kernel_param): inputs = keras.Input(shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)) x = inputs # first CONV => RELU => POOL layer set x = layers.Conv2D( 32, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="same", activation="relu", )(x) x = layers.Conv2D( 32, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # second CONV => RELU => POOL layer set x = layers.Conv2D( 96, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="same", activation="relu", )(x) x = layers.Conv2D( 96, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # third CONV => RELU => POOL layer set x = layers.Conv2D( 128, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="valid", activation="relu", )(x) x = layers.Conv2D( 128, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="valid", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # Fully connected layer x = layers.Flatten()(x) x = layers.Dense(384, activation="relu", kernel_initializer="he_uniform")(x) # Output layer x = layers.Dropout(0.2)(x) outputs = layers.Dense(units=N_CLASSES, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) return model seed = 7 tf.random.set_seed(seed) model = KerasClassifier( build_model, loss="categorical_crossentropy", epochs=N_EPOCH, batch_size=BATCH_SIZE, verbose=1, ) kernel_param = [(3, 3), (4, 4)] learning_rate = [1e-1, 1e-2, 1e-3] optimizer_param = ["SGD", "Adam"] param_grid = dict( model__kernel_param=kernel_param, optimizer__learning_rate=learning_rate, optimizer=optimizer_param, ) grid = GridSearchCV( estimator=model, param_grid=param_grid, cv=3, return_train_score=True, verbose=1 ) grid_result = grid.fit(input_X_train, output_Y_train) print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_["mean_test_score"] stds = grid_result.cv_results_["std_test_score"] params = grid_result.cv_results_["params"] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) results_df = pd.DataFrame(grid_result.cv_results_) results_df = results_df.sort_values(by=["rank_test_score"]) results_df pd.set_option("display.max_columns", None) results_df = results_df.set_index( results_df["params"].apply(lambda x: "_".join(str(val) for val in x.values())) ).rename_axis("kernel") results_df = results_df[ ["params", "rank_test_score", "mean_test_score", "mean_train_score"] ] results_df results_df.to_csv("/kaggle/working/results_df.csv") # ## Model training on best hyperparameters def build_model(): inputs = keras.Input(shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)) x = inputs # first CONV => RELU => POOL layer set x = layers.Conv2D( 32, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.Conv2D( 32, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # second CONV => RELU => POOL layer set x = layers.Conv2D( 96, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.Conv2D( 96, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # third CONV => RELU => POOL layer set x = layers.Conv2D( 128, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="valid", activation="relu", )(x) x = layers.Conv2D( 128, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="valid", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # Fully connected layer x = layers.Flatten()(x) x = layers.Dense(384, activation="relu", kernel_initializer="he_uniform")(x) # Output layer x = layers.Dropout(0.2)(x) outputs = layers.Dense(units=N_CLASSES, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) optimizer = Adam(learning_rate=0.001) model.compile( loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer ) return model # training/fitting of the complex DNN model model = build_model() history = model.fit( input_X_train, output_Y_train, batch_size=BATCH_SIZE, epochs=N_EPOCH, validation_split=VALIDATION_SPLIT, verbose=VERBOSE, ) # Testing score = model.evaluate( input_X_test, output_Y_test, batch_size=BATCH_SIZE, verbose=VERBOSE ) print("\nTest score/loss:", score[0]) print("Test accuracy:", score[1]) # list all data in history print(history.history.keys()) # summarize history for accuracy # plt.plot(mo) plt.plot(history.history["accuracy"]) plt.plot(history.history["val_accuracy"]) plt.title("model accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() # summarize history for loss plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/161/129161223.ipynb
null
null
[{"Id": 129161223, "ScriptId": 38394661, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5849985, "CreationDate": "05/11/2023 12:53:24", "VersionNumber": 1.0, "Title": "CIFAR-100 with Deep CNN", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 314.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 308.0, "LinesInsertedFromFork": 6.0, "LinesDeletedFromFork": 5.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 308.0, "TotalVotes": 0}]
null
null
null
null
# importing of modules for CIFAR-10 CNN from tensorflow.keras.datasets import cifar10, cifar100 from tensorflow.keras import utils from tensorflow.keras.models import Sequential from tensorflow.keras.layers import ( Dense, Dropout, Activation, Flatten, SpatialDropout2D, ) from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.optimizers import SGD, Adam, RMSprop import tensorflow as tf from sklearn.model_selection import GridSearchCV from scikeras.wrappers import KerasClassifier from sklearn.model_selection import GridSearchCV import numpy as np import pandas as pd import keras_tuner from tensorflow import keras from tensorflow.keras import layers import matplotlib import matplotlib.pyplot as plt from IPython import display import pandas as pd from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import backend as K from sklearn.metrics import classification_report import cv2 print("Libraries imported.") # training constants BATCH_SIZE = 64 N_EPOCH = 100 N_CLASSES = 100 VERBOSE = 1 VALIDATION_SPLIT = 0.2 IMG_CHANNELS = 3 IMG_ROWS = 32 IMG_COLS = 32 (input_X_train, output_y_train), (input_X_test, output_y_test) = cifar100.load_data() print("input_X_train shape:", input_X_train.shape) print(input_X_train.shape[0], "train samples") print(input_X_test.shape[0], "test samples") # convert to categorical output_Y_train = utils.to_categorical(output_y_train, N_CLASSES) output_Y_test = utils.to_categorical(output_y_test, N_CLASSES) # float and normalization input_X_train = input_X_train.astype("float32") input_X_test = input_X_test.astype("float32") input_X_train /= 255 input_X_test /= 255 plt.figure(figsize=(10, 10)) for i in range(4 * 4): plt.subplot(4, 4, i + 1) plt.imshow(input_X_train[i]) plt.axis("off") plt.title(output_y_train[i]) plt.show() # # Hyperparameter Tuning with Keras tuner # ## Using Bayesian optimizer # Take VGG as the baseline model def build_model(hp): inputs = keras.Input(shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)) x = inputs # first CONV => RELU => POOL layer set x = layers.Conv2D( hp.Int("conv_1", 32, 64, step=32), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.Conv2D( hp.Int("conv_1", 32, 64, step=32), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(hp.Float("dropout_1", 0, 0.4, step=0.2))(x) # second CONV => RELU => POOL layer set x = layers.Conv2D( hp.Int("conv_2", 64, 128, step=32), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.Conv2D( hp.Int("conv_2", 64, 128, step=32), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(hp.Float("dropout_2", 0, 0.4, step=0.2))(x) # third CONV => RELU => POOL layer set x = layers.Conv2D( hp.Int("conv_3", 128, 256, step=64), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="valid", activation="relu", )(x) x = layers.Conv2D( hp.Int("conv_3", 128, 256, step=64), kernel_initializer="he_uniform", kernel_size=(3, 3), padding="valid", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(hp.Float("dropout_3", 0, 0.4, step=0.2))(x) # Fully connected layer x = layers.Flatten()(x) x = layers.Dense( hp.Int("dense_units", min_value=128, max_value=384, step=128), activation="relu", kernel_initializer="he_uniform", )(x) # Output layer x = layers.Dropout(0.2)(x) outputs = layers.Dense(units=N_CLASSES, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) # initialize the learning rate choices and optimizer lr = hp.Choice("learning_rate", values=[1e-1, 1e-3]) opt = hp.Choice("optimizer", values=["SGD", "Adam"]) optimizer = SGD(learning_rate=lr) if opt == "Adam": optimizer = Adam(learning_rate=lr) model.compile( loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer ) return model # Stop training if validation loss does not improve after 10 epochs es = EarlyStopping(monitor="val_loss", patience=20, restore_best_weights=True) print("Instantiating a bayesian optimization tuner object...") tuner = keras_tuner.BayesianOptimization( build_model, objective="val_accuracy", max_trials=10, seed=42, directory="/kaggle/working/temp/tb_logs_bayesian", ) # ### Best hyperparemeter searching print("Performing hyperparameter search...") tuner.search( x=input_X_train, y=output_Y_train, validation_data=(input_X_test, output_Y_test), batch_size=BATCH_SIZE, callbacks=[es], epochs=N_EPOCH, ) bestHP = tuner.get_best_hyperparameters(num_trials=1)[0] # identify the best hyperparameters print("Optimal number of filters in conv_1 layer: {}".format(bestHP.get("conv_1"))) print("Optimal number of filters in conv_2 layer: {}".format(bestHP.get("conv_2"))) print("Optimal number of filters in conv_3 layer: {}".format(bestHP.get("conv_3"))) print("Optimal number of units in dense layer: {}".format(bestHP.get("dense_units"))) print("Drop-out rate in 1st layer: {}".format(bestHP.get("dropout_1"))) print("Drop-out rate in 2nd layer: {}".format(bestHP.get("dropout_2"))) print("Drop-out rate in 3rd layer: {}".format(bestHP.get("dropout_3"))) print("Optimal learning rate: {:.4f}".format(bestHP.get("learning_rate"))) print("Better Optimizer: {}".format(bestHP.get("optimizer"))) tuner.results_summary() # ## GRID Search # convo_1, convo_2, convo_3, dense units, drop outs from the previous steps # Perform grid search on kernel size, learning rate & optimizers def build_model(kernel_param): inputs = keras.Input(shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)) x = inputs # first CONV => RELU => POOL layer set x = layers.Conv2D( 32, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="same", activation="relu", )(x) x = layers.Conv2D( 32, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # second CONV => RELU => POOL layer set x = layers.Conv2D( 96, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="same", activation="relu", )(x) x = layers.Conv2D( 96, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # third CONV => RELU => POOL layer set x = layers.Conv2D( 128, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="valid", activation="relu", )(x) x = layers.Conv2D( 128, kernel_initializer="he_uniform", kernel_size=kernel_param, padding="valid", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # Fully connected layer x = layers.Flatten()(x) x = layers.Dense(384, activation="relu", kernel_initializer="he_uniform")(x) # Output layer x = layers.Dropout(0.2)(x) outputs = layers.Dense(units=N_CLASSES, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) return model seed = 7 tf.random.set_seed(seed) model = KerasClassifier( build_model, loss="categorical_crossentropy", epochs=N_EPOCH, batch_size=BATCH_SIZE, verbose=1, ) kernel_param = [(3, 3), (4, 4)] learning_rate = [1e-1, 1e-2, 1e-3] optimizer_param = ["SGD", "Adam"] param_grid = dict( model__kernel_param=kernel_param, optimizer__learning_rate=learning_rate, optimizer=optimizer_param, ) grid = GridSearchCV( estimator=model, param_grid=param_grid, cv=3, return_train_score=True, verbose=1 ) grid_result = grid.fit(input_X_train, output_Y_train) print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_["mean_test_score"] stds = grid_result.cv_results_["std_test_score"] params = grid_result.cv_results_["params"] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) results_df = pd.DataFrame(grid_result.cv_results_) results_df = results_df.sort_values(by=["rank_test_score"]) results_df pd.set_option("display.max_columns", None) results_df = results_df.set_index( results_df["params"].apply(lambda x: "_".join(str(val) for val in x.values())) ).rename_axis("kernel") results_df = results_df[ ["params", "rank_test_score", "mean_test_score", "mean_train_score"] ] results_df results_df.to_csv("/kaggle/working/results_df.csv") # ## Model training on best hyperparameters def build_model(): inputs = keras.Input(shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)) x = inputs # first CONV => RELU => POOL layer set x = layers.Conv2D( 32, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.Conv2D( 32, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # second CONV => RELU => POOL layer set x = layers.Conv2D( 96, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.Conv2D( 96, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="same", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # third CONV => RELU => POOL layer set x = layers.Conv2D( 128, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="valid", activation="relu", )(x) x = layers.Conv2D( 128, kernel_initializer="he_uniform", kernel_size=(3, 3), padding="valid", activation="relu", )(x) x = layers.BatchNormalization(axis=-1)(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Dropout(0.4)(x) # Fully connected layer x = layers.Flatten()(x) x = layers.Dense(384, activation="relu", kernel_initializer="he_uniform")(x) # Output layer x = layers.Dropout(0.2)(x) outputs = layers.Dense(units=N_CLASSES, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) optimizer = Adam(learning_rate=0.001) model.compile( loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer ) return model # training/fitting of the complex DNN model model = build_model() history = model.fit( input_X_train, output_Y_train, batch_size=BATCH_SIZE, epochs=N_EPOCH, validation_split=VALIDATION_SPLIT, verbose=VERBOSE, ) # Testing score = model.evaluate( input_X_test, output_Y_test, batch_size=BATCH_SIZE, verbose=VERBOSE ) print("\nTest score/loss:", score[0]) print("Test accuracy:", score[1]) # list all data in history print(history.history.keys()) # summarize history for accuracy # plt.plot(mo) plt.plot(history.history["accuracy"]) plt.plot(history.history["val_accuracy"]) plt.title("model accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() # summarize history for loss plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show()
false
0
4,030
0
4,030
4,030
129161219
<jupyter_start><jupyter_text>Car Data Kaggle dataset identifier: car-data <jupyter_script># # Feature Selection Methods compared using cars data: # - **featurewiz**: A Python library that automates feature selection using various algorithms and techniques. # - **feature-engine**: A Python library that provides tools for feature engineering and selection, such as transformers, selectors, and wrappers. # - **RFE**: Recursive Feature Elimination, a wrapper method that iteratively removes features based on the importance scores from a model. # - **Auto-Feat**: A Python library that automatically generates new features from existing ones using symbolic regression. # - **SFS**: Sequential Forward Selection, a wrapper method that sequentially adds features that improve the performance of a model. # - **SBS**: Sequential Backward Selection, a wrapper method that sequentially removes features that do not degrade the performance of a model. # - **SFFS**: Sequential Floating Forward Selection, a wrapper method that combines SFS and SBS to add and remove features dynamically. # - **SBFS**: Sequential Floating Backward Selection, a wrapper method that combines SBS and SFS to remove and add features dynamically. # - **Boruta**: A Python implementation of a feature selection algorithm based on random forests and permutation tests. # ## Make sure you downgrade XGBoost to an earlier version since new version API has changed and this notebook will give you an error! # # Make sure you restart the Kernel now after installing all these libraries. Otherwise you might get strange errors in Kaggle Notebooks! # # The data we will be using is a modified cars dataset that has 205 rows and 26 features # ### cars data courtesy: https://www.kaggle.com/datasets/goyalshalini93/car-data import numpy as np import pandas as pd import time from sklearn.model_selection import train_test_split start_time2 = time.time() import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) datapath = "/kaggle/input/car-data/CarPrice_Assignment.csv" # Set the target variable feats_list = {} ### 5-fold feature selection using each list of features from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import accuracy_score, mean_squared_error feats_df = pd.DataFrame( feats_list, columns=["Method", "Features", "Mean RMSE"] ).set_index("Method") feats_df # Load the cars dataset and drop the car_ID feature since we don't want to use it! total_df = pd.read_csv(datapath).drop("car_ID", axis=1) print(total_df.shape) total_df.head() target = "price" preds = [x for x in list(total_df) if x not in [target]] from featurewiz import FeatureWiz start_time = time.time() # Run featurewiz to select the best features fwiz = FeatureWiz(corr_limit=0.9, verbose=0) # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) ### select features by transforming train df_transformed = fwiz.fit_transform(df.drop(target, axis=1), df[target]) features = fwiz.features test_transformed = test[features] # Display the selected features feats_df.loc["featurewiz", "Features"] = f"{features}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, df_transformed, df[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["featurewiz", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["featurewiz", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) ## python from feature_engine.selection import SmartCorrelatedSelection start_time = time.time() # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) # Create a SmartCorrelatedSelection object sel = SmartCorrelatedSelection(variables=None, method="pearson", threshold=0.8) # Fit the selector to the data sel.fit(df.drop(target, axis=1), df[target]) # Transform the data df_transformed = sel.transform(df.drop(target, axis=1)) test_transformed = sel.transform(test.drop(target, axis=1)) # Display the selected features fe = df_transformed.columns.tolist() feats_df.loc["feature-engine", "Features"] = f"{fe}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, df_transformed, df[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["feature-engine", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["feature-engine", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) from feature_engine.selection import RecursiveFeatureElimination from sklearn.linear_model import LinearRegression start_time = time.time() # Create a LinearRegression model model = LinearRegression() # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) # Create a RecursiveFeatureElimination selector sel = RecursiveFeatureElimination(estimator=model, variables=None, scoring="r2", cv=3) # Fit the selector to the data sel.fit(df.drop(target, axis=1), df[target]) # Transform the data df_transformed = sel.transform(df.drop(target, axis=1)) test_transformed = sel.transform(test.drop(target, axis=1)) # Display the selected features rfee = df_transformed.columns.tolist() feats_df.loc["RFE", "Features"] = f"{rfee}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, df_transformed, df[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["RFE", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["RFE", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) from autofeat import AutoFeatRegressor start_time = time.time() # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) ### remove zero variance columns ### preds = [x for x in list(df) if x not in df.columns[df.var() == 0]] # Create an AutoFeatRegressor object ### If you increase the steps from 1 to 2 then 3, you can stop when they don't improve R_squared afreg = AutoFeatRegressor(verbose=1, feateng_steps=1) # Fit the regressor to the data df_transformed = afreg.fit_transform(df[preds].drop(target, axis=1), df[target]) test_transformed = afreg.transform(test[preds].drop(target, axis=1)) # Display the selected features afe = df_transformed.columns.tolist() feats_df.loc["Auto-Feat", "Features"] = f"{afe}" ### You need to convert the names to string - otherwise error! df_transformed.columns = df_transformed[afe].columns.astype(str) test_transformed.columns = test_transformed[afe].columns.astype(str) model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, df_transformed, df[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["Auto-Feat", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["Auto-Feat", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) # ### It is unbelievable that AutoFeat generates 1,300 new features (if you set just feateng_steps=1) and yet could not beat featurewiz! But AutoFeat can be a useful method for small datasets. from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from mlxtend.feature_selection import SequentialFeatureSelector start_time = time.time() # Convert categorical columns to numeric # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) preds = [x for x in list(dfd) if x not in [target]] # Define the estimator knn = KNeighborsRegressor(n_neighbors=5) # Sequential Forward Selection (SFS) sfs = SequentialFeatureSelector( knn, k_features="best", forward=True, floating=False, scoring="r2", cv=0 ) sfs.fit(df.drop(target, axis=1), df[target]) sfse = np.take(preds, sfs.k_feature_idx_).tolist() df_transformed = df[sfse] test_transformed = test[sfse] # Print the selected features # print('Sequential Forward Selection (SFS) features:', np.take(preds, sfs.k_feature_idx_).tolist()) feats_df.loc["SFS", "Features"] = f"{sfse}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, dfd[sfse], dfd[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["SFS", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["SFS", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) start_time = time.time() # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) preds = [x for x in list(dfd) if x not in [target]] # Define the estimator knn = KNeighborsRegressor(n_neighbors=5) # Sequential Backward Selection (SBS) sbs = SequentialFeatureSelector( knn, k_features="best", forward=False, floating=False, scoring="r2", cv=0 ) sbs.fit(dfd.drop(target, axis=1), dfd[target]) sbse = np.take(preds, sbs.k_feature_idx_).tolist() df_transformed = df[sbse] test_transformed = test[sbse] # Print the selected features print("Sequential Backward Selection (SBS):", sbs.k_feature_idx_) feats_df.loc["SBS", "Features"] = f"{sbse}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, dfd[sbse], dfd[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["SBS", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["SBS", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) # Define the estimator start_time = time.time() knn = KNeighborsRegressor(n_neighbors=5) # Sequential Forward Floating Selection (SFFS) sffs = SequentialFeatureSelector( knn, k_features="best", forward=True, floating=True, scoring="r2", cv=0 ) sffs.fit(dfd.drop(target, axis=1), dfd[target]) sffse = np.take(preds, sffs.k_feature_idx_).tolist() df_transformed = df[sffse] test_transformed = test[sffse] # Print the selected features print("Sequential Forward Floating Selection (SFFS):", sffs.k_feature_idx_) feats_df.loc["SFFS", "Features"] = f"{sffse}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, dfd[sffse], dfd[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["SFFS", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["SFFS", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) # Define the estimator start_time = time.time() knn = KNeighborsRegressor(n_neighbors=5) # Sequential Backward Floating Selection (SBFS) sbfs = SequentialFeatureSelector( knn, k_features="best", forward=False, floating=True, scoring="r2", cv=0 ) sbfs.fit(dfd.drop(target, axis=1), dfd[target]) sbfse = np.take(preds, sbfs.k_feature_idx_).tolist() df_transformed = df[sbfse] test_transformed = test[sbfse] # Print the selected features print("Sequential Backward Floating Selection (SBFS):", sbfs.k_feature_idx_) feats_df.loc["SBFS", "Features"] = f"{sbfse}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, dfd[sbfse], dfd[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["SBFS", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["SBFS", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) from boruta import BorutaPy from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression import pandas as pd import time start_time = time.time() # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) rfc = RandomForestRegressor(n_jobs=-1, random_state=42) boruta_selector = BorutaPy( rfc, n_estimators="auto", random_state=1, two_step=False, verbose=0, ) # NOTE BorutaPy accepts numpy arrays only! boruta_selector.fit(dfd.drop(target, axis=1).values, dfd[target].values) boru = dfd.drop(target, axis=1).columns[boruta_selector.support_].tolist() df_transformed = df[boru] test_transformed = test[boru] # Print the selected features # print('Boruta Selection (Boru):', boru) feats_df.loc["Boruta", "Features"] = f"{boru}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, dfd[boru], dfd[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["Boruta", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["Boruta", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) # ### Let's compute the average RMSE for each method over both Test set and CV results print( "Overall Time taken for all methods = %0.0f seconds" % (time.time() - start_time2) ) feats_df["average per method"] = feats_df[["Mean RMSE", "Test RMSE"]].mean(axis=1) feats_df["Num. Features Selected"] = feats_df["Features"].apply(lambda x: len(eval(x))) feats_df.sort_values("average per method").reset_index()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/161/129161219.ipynb
car-data
goyalshalini93
[{"Id": 129161219, "ScriptId": 38393941, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 631309, "CreationDate": "05/11/2023 12:53:23", "VersionNumber": 1.0, "Title": "AICamp_Special_Feature_Selection_Methods", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 365.0, "LinesInsertedFromPrevious": 365.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184963474, "KernelVersionId": 129161219, "SourceDatasetVersionId": 395113}]
[{"Id": 395113, "DatasetId": 175168, "DatasourceVersionId": 410111, "CreatorUserId": 1664460, "LicenseName": "Unknown", "CreationDate": "04/25/2019 18:24:13", "VersionNumber": 1.0, "Title": "Car Data", "Slug": "car-data", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 41601.0, "TotalUncompressedBytes": 41601.0}]
[{"Id": 175168, "CreatorUserId": 1664460, "OwnerUserId": 1664460.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 395113.0, "CurrentDatasourceVersionId": 410111.0, "ForumId": 185913, "Type": 2, "CreationDate": "04/25/2019 18:24:13", "LastActivityDate": "04/25/2019", "TotalViews": 61971, "TotalDownloads": 28467, "TotalVotes": 84, "TotalKernels": 79}]
[{"Id": 1664460, "UserName": "goyalshalini93", "DisplayName": "Shalini Goyal", "RegisterDate": "02/24/2018", "PerformanceTier": 1}]
# # Feature Selection Methods compared using cars data: # - **featurewiz**: A Python library that automates feature selection using various algorithms and techniques. # - **feature-engine**: A Python library that provides tools for feature engineering and selection, such as transformers, selectors, and wrappers. # - **RFE**: Recursive Feature Elimination, a wrapper method that iteratively removes features based on the importance scores from a model. # - **Auto-Feat**: A Python library that automatically generates new features from existing ones using symbolic regression. # - **SFS**: Sequential Forward Selection, a wrapper method that sequentially adds features that improve the performance of a model. # - **SBS**: Sequential Backward Selection, a wrapper method that sequentially removes features that do not degrade the performance of a model. # - **SFFS**: Sequential Floating Forward Selection, a wrapper method that combines SFS and SBS to add and remove features dynamically. # - **SBFS**: Sequential Floating Backward Selection, a wrapper method that combines SBS and SFS to remove and add features dynamically. # - **Boruta**: A Python implementation of a feature selection algorithm based on random forests and permutation tests. # ## Make sure you downgrade XGBoost to an earlier version since new version API has changed and this notebook will give you an error! # # Make sure you restart the Kernel now after installing all these libraries. Otherwise you might get strange errors in Kaggle Notebooks! # # The data we will be using is a modified cars dataset that has 205 rows and 26 features # ### cars data courtesy: https://www.kaggle.com/datasets/goyalshalini93/car-data import numpy as np import pandas as pd import time from sklearn.model_selection import train_test_split start_time2 = time.time() import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) datapath = "/kaggle/input/car-data/CarPrice_Assignment.csv" # Set the target variable feats_list = {} ### 5-fold feature selection using each list of features from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import accuracy_score, mean_squared_error feats_df = pd.DataFrame( feats_list, columns=["Method", "Features", "Mean RMSE"] ).set_index("Method") feats_df # Load the cars dataset and drop the car_ID feature since we don't want to use it! total_df = pd.read_csv(datapath).drop("car_ID", axis=1) print(total_df.shape) total_df.head() target = "price" preds = [x for x in list(total_df) if x not in [target]] from featurewiz import FeatureWiz start_time = time.time() # Run featurewiz to select the best features fwiz = FeatureWiz(corr_limit=0.9, verbose=0) # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) ### select features by transforming train df_transformed = fwiz.fit_transform(df.drop(target, axis=1), df[target]) features = fwiz.features test_transformed = test[features] # Display the selected features feats_df.loc["featurewiz", "Features"] = f"{features}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, df_transformed, df[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["featurewiz", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["featurewiz", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) ## python from feature_engine.selection import SmartCorrelatedSelection start_time = time.time() # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) # Create a SmartCorrelatedSelection object sel = SmartCorrelatedSelection(variables=None, method="pearson", threshold=0.8) # Fit the selector to the data sel.fit(df.drop(target, axis=1), df[target]) # Transform the data df_transformed = sel.transform(df.drop(target, axis=1)) test_transformed = sel.transform(test.drop(target, axis=1)) # Display the selected features fe = df_transformed.columns.tolist() feats_df.loc["feature-engine", "Features"] = f"{fe}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, df_transformed, df[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["feature-engine", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["feature-engine", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) from feature_engine.selection import RecursiveFeatureElimination from sklearn.linear_model import LinearRegression start_time = time.time() # Create a LinearRegression model model = LinearRegression() # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) # Create a RecursiveFeatureElimination selector sel = RecursiveFeatureElimination(estimator=model, variables=None, scoring="r2", cv=3) # Fit the selector to the data sel.fit(df.drop(target, axis=1), df[target]) # Transform the data df_transformed = sel.transform(df.drop(target, axis=1)) test_transformed = sel.transform(test.drop(target, axis=1)) # Display the selected features rfee = df_transformed.columns.tolist() feats_df.loc["RFE", "Features"] = f"{rfee}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, df_transformed, df[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["RFE", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["RFE", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) from autofeat import AutoFeatRegressor start_time = time.time() # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) ### remove zero variance columns ### preds = [x for x in list(df) if x not in df.columns[df.var() == 0]] # Create an AutoFeatRegressor object ### If you increase the steps from 1 to 2 then 3, you can stop when they don't improve R_squared afreg = AutoFeatRegressor(verbose=1, feateng_steps=1) # Fit the regressor to the data df_transformed = afreg.fit_transform(df[preds].drop(target, axis=1), df[target]) test_transformed = afreg.transform(test[preds].drop(target, axis=1)) # Display the selected features afe = df_transformed.columns.tolist() feats_df.loc["Auto-Feat", "Features"] = f"{afe}" ### You need to convert the names to string - otherwise error! df_transformed.columns = df_transformed[afe].columns.astype(str) test_transformed.columns = test_transformed[afe].columns.astype(str) model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, df_transformed, df[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["Auto-Feat", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["Auto-Feat", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) # ### It is unbelievable that AutoFeat generates 1,300 new features (if you set just feateng_steps=1) and yet could not beat featurewiz! But AutoFeat can be a useful method for small datasets. from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from mlxtend.feature_selection import SequentialFeatureSelector start_time = time.time() # Convert categorical columns to numeric # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) preds = [x for x in list(dfd) if x not in [target]] # Define the estimator knn = KNeighborsRegressor(n_neighbors=5) # Sequential Forward Selection (SFS) sfs = SequentialFeatureSelector( knn, k_features="best", forward=True, floating=False, scoring="r2", cv=0 ) sfs.fit(df.drop(target, axis=1), df[target]) sfse = np.take(preds, sfs.k_feature_idx_).tolist() df_transformed = df[sfse] test_transformed = test[sfse] # Print the selected features # print('Sequential Forward Selection (SFS) features:', np.take(preds, sfs.k_feature_idx_).tolist()) feats_df.loc["SFS", "Features"] = f"{sfse}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, dfd[sfse], dfd[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["SFS", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["SFS", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) start_time = time.time() # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) preds = [x for x in list(dfd) if x not in [target]] # Define the estimator knn = KNeighborsRegressor(n_neighbors=5) # Sequential Backward Selection (SBS) sbs = SequentialFeatureSelector( knn, k_features="best", forward=False, floating=False, scoring="r2", cv=0 ) sbs.fit(dfd.drop(target, axis=1), dfd[target]) sbse = np.take(preds, sbs.k_feature_idx_).tolist() df_transformed = df[sbse] test_transformed = test[sbse] # Print the selected features print("Sequential Backward Selection (SBS):", sbs.k_feature_idx_) feats_df.loc["SBS", "Features"] = f"{sbse}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, dfd[sbse], dfd[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["SBS", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["SBS", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) # Define the estimator start_time = time.time() knn = KNeighborsRegressor(n_neighbors=5) # Sequential Forward Floating Selection (SFFS) sffs = SequentialFeatureSelector( knn, k_features="best", forward=True, floating=True, scoring="r2", cv=0 ) sffs.fit(dfd.drop(target, axis=1), dfd[target]) sffse = np.take(preds, sffs.k_feature_idx_).tolist() df_transformed = df[sffse] test_transformed = test[sffse] # Print the selected features print("Sequential Forward Floating Selection (SFFS):", sffs.k_feature_idx_) feats_df.loc["SFFS", "Features"] = f"{sffse}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, dfd[sffse], dfd[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["SFFS", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["SFFS", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) # Define the estimator start_time = time.time() knn = KNeighborsRegressor(n_neighbors=5) # Sequential Backward Floating Selection (SBFS) sbfs = SequentialFeatureSelector( knn, k_features="best", forward=False, floating=True, scoring="r2", cv=0 ) sbfs.fit(dfd.drop(target, axis=1), dfd[target]) sbfse = np.take(preds, sbfs.k_feature_idx_).tolist() df_transformed = df[sbfse] test_transformed = test[sbfse] # Print the selected features print("Sequential Backward Floating Selection (SBFS):", sbfs.k_feature_idx_) feats_df.loc["SBFS", "Features"] = f"{sbfse}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, dfd[sbfse], dfd[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["SBFS", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["SBFS", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) from boruta import BorutaPy from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression import pandas as pd import time start_time = time.time() # Convert categorical columns to numeric dfd = pd.get_dummies(total_df, drop_first=True) #### split into df and test so that we can set up a hold out set df, test = train_test_split(dfd, test_size=0.05, random_state=99) rfc = RandomForestRegressor(n_jobs=-1, random_state=42) boruta_selector = BorutaPy( rfc, n_estimators="auto", random_state=1, two_step=False, verbose=0, ) # NOTE BorutaPy accepts numpy arrays only! boruta_selector.fit(dfd.drop(target, axis=1).values, dfd[target].values) boru = dfd.drop(target, axis=1).columns[boruta_selector.support_].tolist() df_transformed = df[boru] test_transformed = test[boru] # Print the selected features # print('Boruta Selection (Boru):', boru) feats_df.loc["Boruta", "Features"] = f"{boru}" model = RandomForestRegressor(random_state=99) sco = cross_val_score( model, dfd[boru], dfd[target], cv=5, n_jobs=-1, verbose=0, scoring="neg_root_mean_squared_error", ) * (-1) feats_df.loc["Boruta", "Mean RMSE"] = sco.mean() model = RandomForestRegressor(random_state=99) model.fit(df_transformed, df[target]) sco2 = np.sqrt(mean_squared_error(test[target], model.predict(test_transformed))) feats_df.loc["Boruta", "Test RMSE"] = sco2.mean() print("Time taken = %0.0f seconds" % (time.time() - start_time)) # ### Let's compute the average RMSE for each method over both Test set and CV results print( "Overall Time taken for all methods = %0.0f seconds" % (time.time() - start_time2) ) feats_df["average per method"] = feats_df[["Mean RMSE", "Test RMSE"]].mean(axis=1) feats_df["Num. Features Selected"] = feats_df["Features"].apply(lambda x: len(eval(x))) feats_df.sort_values("average per method").reset_index()
false
0
4,890
0
4,908
4,890
129161044
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from sklearn.ensemble import ( RandomForestClassifier, BaggingClassifier, GradientBoostingClassifier, ) from xgboost import XGBClassifier from sklearn.ensemble import VotingClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, accuracy_score from imblearn.under_sampling import NearMiss from imblearn.over_sampling import RandomOverSampler from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import GridSearchCV df = pd.read_csv("/kaggle/input/nasadsa/nasa.csv") df.shape df.info() df[["Orbit Determination Date", "Close Approach Date"]] df = df.drop(["Name", "Neo Reference ID"], axis=1) df = df.drop(["Orbit Determination Date", "Close Approach Date"], axis=1) df.info() df = df.drop(["Equinox", "Orbiting Body"], axis=1) df.columns = df.columns.str.replace(" ", "_") df plt.figure(figsize=(22, 8)) sns.heatmap(data=df.corr(), annot=True) df.columns c = [ "Est_Dia_in_M(min)", "Est_Dia_in_M(max)", "Est_Dia_in_Miles(min)", "Est_Dia_in_Miles(max)", "Est_Dia_in_Feet(min)", "Est_Dia_in_Feet(max)", "Relative_Velocity_km_per_hr", "Miles_per_hour", "Miss_Dist.(Astronomical)", "Miss_Dist.(lunar)", "Miss_Dist.(kilometers)", "Miss_Dist.(miles)", ] df = df.drop(labels=c, axis=1) a = df["Hazardous"].value_counts() a[0] / a.sum(), a[1] / a.sum() df = pd.get_dummies(data=df, columns=["Hazardous"], drop_first=True) df plt.figure(figsize=(5, 7)) sns.heatmap( data=df.corr()[["Hazardous_True"]].sort_values( by="Hazardous_True", ascending=False ), annot=True, ) df.Hazardous_True.value_counts() from sklearn.preprocessing import RobustScaler X = df.drop(["Hazardous_True"], axis=1) y = df["Hazardous_True"] scaler = RobustScaler() X_columns = X.columns X = scaler.fit_transform(X) X = pd.DataFrame(X, columns=X_columns) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, stratify=y, random_state=123 ) xgbmodel = XGBClassifier(use_label_encoder=False) xgbmodel.fit(X_train, y_train) xgb_ypredict = xgbmodel.predict(X_test) xgb_ypredict_train = xgbmodel.predict(X_train) print(classification_report(y_train, xgb_ypredict_train)) print(classification_report(y_test, xgb_ypredict)) Importance = pd.DataFrame( {"Features": xgbmodel.feature_importances_}, index=X_train.columns ) data = Importance.sort_values(by="Features", axis=0, ascending=True) fig = plt.figure(figsize=(10, 6)) ax = fig.add_axes([0, 0, 1, 1]) ax.barh(data.index[-10:], data.Features[-10:], color="purple") plt.show() pd.DataFrame({"Features": xgbmodel.feature_importances_}, index=X.columns).sort_values( by="Features", ascending=False )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/161/129161044.ipynb
null
null
[{"Id": 129161044, "ScriptId": 36095983, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11986220, "CreationDate": "05/11/2023 12:52:03", "VersionNumber": 1.0, "Title": "Hyperparameter mix", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 108.0, "LinesInsertedFromPrevious": 108.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from sklearn.ensemble import ( RandomForestClassifier, BaggingClassifier, GradientBoostingClassifier, ) from xgboost import XGBClassifier from sklearn.ensemble import VotingClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, accuracy_score from imblearn.under_sampling import NearMiss from imblearn.over_sampling import RandomOverSampler from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import GridSearchCV df = pd.read_csv("/kaggle/input/nasadsa/nasa.csv") df.shape df.info() df[["Orbit Determination Date", "Close Approach Date"]] df = df.drop(["Name", "Neo Reference ID"], axis=1) df = df.drop(["Orbit Determination Date", "Close Approach Date"], axis=1) df.info() df = df.drop(["Equinox", "Orbiting Body"], axis=1) df.columns = df.columns.str.replace(" ", "_") df plt.figure(figsize=(22, 8)) sns.heatmap(data=df.corr(), annot=True) df.columns c = [ "Est_Dia_in_M(min)", "Est_Dia_in_M(max)", "Est_Dia_in_Miles(min)", "Est_Dia_in_Miles(max)", "Est_Dia_in_Feet(min)", "Est_Dia_in_Feet(max)", "Relative_Velocity_km_per_hr", "Miles_per_hour", "Miss_Dist.(Astronomical)", "Miss_Dist.(lunar)", "Miss_Dist.(kilometers)", "Miss_Dist.(miles)", ] df = df.drop(labels=c, axis=1) a = df["Hazardous"].value_counts() a[0] / a.sum(), a[1] / a.sum() df = pd.get_dummies(data=df, columns=["Hazardous"], drop_first=True) df plt.figure(figsize=(5, 7)) sns.heatmap( data=df.corr()[["Hazardous_True"]].sort_values( by="Hazardous_True", ascending=False ), annot=True, ) df.Hazardous_True.value_counts() from sklearn.preprocessing import RobustScaler X = df.drop(["Hazardous_True"], axis=1) y = df["Hazardous_True"] scaler = RobustScaler() X_columns = X.columns X = scaler.fit_transform(X) X = pd.DataFrame(X, columns=X_columns) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, stratify=y, random_state=123 ) xgbmodel = XGBClassifier(use_label_encoder=False) xgbmodel.fit(X_train, y_train) xgb_ypredict = xgbmodel.predict(X_test) xgb_ypredict_train = xgbmodel.predict(X_train) print(classification_report(y_train, xgb_ypredict_train)) print(classification_report(y_test, xgb_ypredict)) Importance = pd.DataFrame( {"Features": xgbmodel.feature_importances_}, index=X_train.columns ) data = Importance.sort_values(by="Features", axis=0, ascending=True) fig = plt.figure(figsize=(10, 6)) ax = fig.add_axes([0, 0, 1, 1]) ax.barh(data.index[-10:], data.Features[-10:], color="purple") plt.show() pd.DataFrame({"Features": xgbmodel.feature_importances_}, index=X.columns).sort_values( by="Features", ascending=False )
false
0
1,182
0
1,182
1,182
129161351
<jupyter_start><jupyter_text>HR Analytics Kaggle dataset identifier: hr-analytics <jupyter_code>import pandas as pd df = pd.read_csv('hr-analytics/HR_comma_sep.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 14999 entries, 0 to 14998 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 satisfaction_level 14999 non-null float64 1 last_evaluation 14999 non-null float64 2 number_project 14999 non-null int64 3 average_montly_hours 14999 non-null int64 4 time_spend_company 14999 non-null int64 5 Work_accident 14999 non-null int64 6 left 14999 non-null int64 7 promotion_last_5years 14999 non-null int64 8 Department 14999 non-null object 9 salary 14999 non-null object dtypes: float64(2), int64(6), object(2) memory usage: 1.1+ MB <jupyter_text>Examples: { "satisfaction_level": 0.38, "last_evaluation": 0.53, "number_project": 2, "average_montly_hours": 157, "time_spend_company": 3, "Work_accident": 0, "left": 1, "promotion_last_5years": 0, "Department": "sales", "salary": "low" } { "satisfaction_level": 0.8, "last_evaluation": 0.86, "number_project": 5, "average_montly_hours": 262, "time_spend_company": 6, "Work_accident": 0, "left": 1, "promotion_last_5years": 0, "Department": "sales", "salary": "medium" } { "satisfaction_level": 0.11, "last_evaluation": 0.88, "number_project": 7, "average_montly_hours": 272, "time_spend_company": 4, "Work_accident": 0, "left": 1, "promotion_last_5years": 0, "Department": "sales", "salary": "medium" } { "satisfaction_level": 0.72, "last_evaluation": 0.87, "number_project": 5, "average_montly_hours": 223, "time_spend_company": 5, "Work_accident": 0, "left": 1, "promotion_last_5years": 0, "Department": "sales", "salary": "low" } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib.pyplot as plt from sklearn import linear_model from sklearn.preprocessing import LabelEncoder import math import os print(os.getcwd()) print(os.listdir()) df = pd.read_csv("/kaggle/input/hr-analytics/HR_comma_sep.csv") df # **DATA EXPLORATION AND VISUALISATION** left = df[df.left == 1] left.shape retained = df[df.left == 0] retained.shape df.groupby("left").mean() # **we can see the ones who left the company are the ones having more workload and less promotions.** pd.crosstab(df.salary, df.left).plot(kind="bar") pd.crosstab(df.Department, df.left).plot(kind="bar") df2 = df[ ["satisfaction_level", "average_montly_hours", "promotion_last_5years", "salary"] ] df2.head() dummies = pd.get_dummies(df2["salary"], dtype=int) dummies df_dummy = pd.concat([df2, dummies], axis="columns") df_dummy df_dummy.drop(["salary"], axis="columns", inplace=True) df_dummy X = df_dummy y = df.left from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.3) from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train, y_train) model.predict(X_test) model.score(X_test, y_test)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/161/129161351.ipynb
hr-analytics
giripujar
[{"Id": 129161351, "ScriptId": 38394179, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15053313, "CreationDate": "05/11/2023 12:54:23", "VersionNumber": 1.0, "Title": "EDA and employee retention using logistic regressi", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 77.0, "LinesInsertedFromPrevious": 77.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 184963762, "KernelVersionId": 129161351, "SourceDatasetVersionId": 15488}]
[{"Id": 15488, "DatasetId": 11142, "DatasourceVersionId": 15488, "CreatorUserId": 1045187, "LicenseName": "CC0: Public Domain", "CreationDate": "01/28/2018 01:12:19", "VersionNumber": 1.0, "Title": "HR Analytics", "Slug": "hr-analytics", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 566785.0, "TotalUncompressedBytes": 566785.0}]
[{"Id": 11142, "CreatorUserId": 1045187, "OwnerUserId": 1045187.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 15488.0, "CurrentDatasourceVersionId": 15488.0, "ForumId": 18532, "Type": 2, "CreationDate": "01/28/2018 01:12:19", "LastActivityDate": "02/05/2018", "TotalViews": 95322, "TotalDownloads": 20840, "TotalVotes": 213, "TotalKernels": 145}]
[{"Id": 1045187, "UserName": "giripujar", "DisplayName": "Giri Pujar", "RegisterDate": "04/27/2017", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib.pyplot as plt from sklearn import linear_model from sklearn.preprocessing import LabelEncoder import math import os print(os.getcwd()) print(os.listdir()) df = pd.read_csv("/kaggle/input/hr-analytics/HR_comma_sep.csv") df # **DATA EXPLORATION AND VISUALISATION** left = df[df.left == 1] left.shape retained = df[df.left == 0] retained.shape df.groupby("left").mean() # **we can see the ones who left the company are the ones having more workload and less promotions.** pd.crosstab(df.salary, df.left).plot(kind="bar") pd.crosstab(df.Department, df.left).plot(kind="bar") df2 = df[ ["satisfaction_level", "average_montly_hours", "promotion_last_5years", "salary"] ] df2.head() dummies = pd.get_dummies(df2["salary"], dtype=int) dummies df_dummy = pd.concat([df2, dummies], axis="columns") df_dummy df_dummy.drop(["salary"], axis="columns", inplace=True) df_dummy X = df_dummy y = df.left from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.3) from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train, y_train) model.predict(X_test) model.score(X_test, y_test)
[{"hr-analytics/HR_comma_sep.csv": {"column_names": "[\"satisfaction_level\", \"last_evaluation\", \"number_project\", \"average_montly_hours\", \"time_spend_company\", \"Work_accident\", \"left\", \"promotion_last_5years\", \"Department\", \"salary\"]", "column_data_types": "{\"satisfaction_level\": \"float64\", \"last_evaluation\": \"float64\", \"number_project\": \"int64\", \"average_montly_hours\": \"int64\", \"time_spend_company\": \"int64\", \"Work_accident\": \"int64\", \"left\": \"int64\", \"promotion_last_5years\": \"int64\", \"Department\": \"object\", \"salary\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 14999 entries, 0 to 14998\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 satisfaction_level 14999 non-null float64\n 1 last_evaluation 14999 non-null float64\n 2 number_project 14999 non-null int64 \n 3 average_montly_hours 14999 non-null int64 \n 4 time_spend_company 14999 non-null int64 \n 5 Work_accident 14999 non-null int64 \n 6 left 14999 non-null int64 \n 7 promotion_last_5years 14999 non-null int64 \n 8 Department 14999 non-null object \n 9 salary 14999 non-null object \ndtypes: float64(2), int64(6), object(2)\nmemory usage: 1.1+ MB\n", "summary": "{\"satisfaction_level\": {\"count\": 14999.0, \"mean\": 0.6128335222348156, \"std\": 0.24863065106114257, \"min\": 0.09, \"25%\": 0.44, \"50%\": 0.64, \"75%\": 0.82, \"max\": 1.0}, \"last_evaluation\": {\"count\": 14999.0, \"mean\": 0.7161017401160078, \"std\": 0.17116911062327533, \"min\": 0.36, \"25%\": 0.56, \"50%\": 0.72, \"75%\": 0.87, \"max\": 1.0}, \"number_project\": {\"count\": 14999.0, \"mean\": 3.80305353690246, \"std\": 1.2325923553183522, \"min\": 2.0, \"25%\": 3.0, \"50%\": 4.0, \"75%\": 5.0, \"max\": 7.0}, \"average_montly_hours\": {\"count\": 14999.0, \"mean\": 201.0503366891126, \"std\": 49.94309937128408, \"min\": 96.0, \"25%\": 156.0, \"50%\": 200.0, \"75%\": 245.0, \"max\": 310.0}, \"time_spend_company\": {\"count\": 14999.0, \"mean\": 3.498233215547703, \"std\": 1.4601362305354812, \"min\": 2.0, \"25%\": 3.0, \"50%\": 3.0, \"75%\": 4.0, \"max\": 10.0}, \"Work_accident\": {\"count\": 14999.0, \"mean\": 0.1446096406427095, \"std\": 0.35171855238017985, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"left\": {\"count\": 14999.0, \"mean\": 0.2380825388359224, \"std\": 0.4259240993802994, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"promotion_last_5years\": {\"count\": 14999.0, \"mean\": 0.021268084538969265, \"std\": 0.14428146457858232, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}}", "examples": "{\"satisfaction_level\":{\"0\":0.38,\"1\":0.8,\"2\":0.11,\"3\":0.72},\"last_evaluation\":{\"0\":0.53,\"1\":0.86,\"2\":0.88,\"3\":0.87},\"number_project\":{\"0\":2,\"1\":5,\"2\":7,\"3\":5},\"average_montly_hours\":{\"0\":157,\"1\":262,\"2\":272,\"3\":223},\"time_spend_company\":{\"0\":3,\"1\":6,\"2\":4,\"3\":5},\"Work_accident\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"left\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\"promotion_last_5years\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"Department\":{\"0\":\"sales\",\"1\":\"sales\",\"2\":\"sales\",\"3\":\"sales\"},\"salary\":{\"0\":\"low\",\"1\":\"medium\",\"2\":\"medium\",\"3\":\"low\"}}"}}]
true
1
<start_data_description><data_path>hr-analytics/HR_comma_sep.csv: <column_names> ['satisfaction_level', 'last_evaluation', 'number_project', 'average_montly_hours', 'time_spend_company', 'Work_accident', 'left', 'promotion_last_5years', 'Department', 'salary'] <column_types> {'satisfaction_level': 'float64', 'last_evaluation': 'float64', 'number_project': 'int64', 'average_montly_hours': 'int64', 'time_spend_company': 'int64', 'Work_accident': 'int64', 'left': 'int64', 'promotion_last_5years': 'int64', 'Department': 'object', 'salary': 'object'} <dataframe_Summary> {'satisfaction_level': {'count': 14999.0, 'mean': 0.6128335222348156, 'std': 0.24863065106114257, 'min': 0.09, '25%': 0.44, '50%': 0.64, '75%': 0.82, 'max': 1.0}, 'last_evaluation': {'count': 14999.0, 'mean': 0.7161017401160078, 'std': 0.17116911062327533, 'min': 0.36, '25%': 0.56, '50%': 0.72, '75%': 0.87, 'max': 1.0}, 'number_project': {'count': 14999.0, 'mean': 3.80305353690246, 'std': 1.2325923553183522, 'min': 2.0, '25%': 3.0, '50%': 4.0, '75%': 5.0, 'max': 7.0}, 'average_montly_hours': {'count': 14999.0, 'mean': 201.0503366891126, 'std': 49.94309937128408, 'min': 96.0, '25%': 156.0, '50%': 200.0, '75%': 245.0, 'max': 310.0}, 'time_spend_company': {'count': 14999.0, 'mean': 3.498233215547703, 'std': 1.4601362305354812, 'min': 2.0, '25%': 3.0, '50%': 3.0, '75%': 4.0, 'max': 10.0}, 'Work_accident': {'count': 14999.0, 'mean': 0.1446096406427095, 'std': 0.35171855238017985, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'left': {'count': 14999.0, 'mean': 0.2380825388359224, 'std': 0.4259240993802994, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'promotion_last_5years': {'count': 14999.0, 'mean': 0.021268084538969265, 'std': 0.14428146457858232, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}} <dataframe_info> RangeIndex: 14999 entries, 0 to 14998 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 satisfaction_level 14999 non-null float64 1 last_evaluation 14999 non-null float64 2 number_project 14999 non-null int64 3 average_montly_hours 14999 non-null int64 4 time_spend_company 14999 non-null int64 5 Work_accident 14999 non-null int64 6 left 14999 non-null int64 7 promotion_last_5years 14999 non-null int64 8 Department 14999 non-null object 9 salary 14999 non-null object dtypes: float64(2), int64(6), object(2) memory usage: 1.1+ MB <some_examples> {'satisfaction_level': {'0': 0.38, '1': 0.8, '2': 0.11, '3': 0.72}, 'last_evaluation': {'0': 0.53, '1': 0.86, '2': 0.88, '3': 0.87}, 'number_project': {'0': 2, '1': 5, '2': 7, '3': 5}, 'average_montly_hours': {'0': 157, '1': 262, '2': 272, '3': 223}, 'time_spend_company': {'0': 3, '1': 6, '2': 4, '3': 5}, 'Work_accident': {'0': 0, '1': 0, '2': 0, '3': 0}, 'left': {'0': 1, '1': 1, '2': 1, '3': 1}, 'promotion_last_5years': {'0': 0, '1': 0, '2': 0, '3': 0}, 'Department': {'0': 'sales', '1': 'sales', '2': 'sales', '3': 'sales'}, 'salary': {'0': 'low', '1': 'medium', '2': 'medium', '3': 'low'}} <end_description>
600
2
1,369
600
129161247
<jupyter_start><jupyter_text>UCI Bag Of Words Kaggle dataset identifier: uci-bag-of-words <jupyter_script># 1. Bag of Words dataset # - a. Combine three of the datasets into a common corpus # import math import random import string import time from typing import List from collections import Counter from multiprocessing import Pool import numpy as np import pandas as pd from scipy import sparse from tqdm import tqdm import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize, sent_tokenize from nltk.lm import MLE from nltk.lm.preprocessing import padded_everygram_pipeline from nltk.tokenize.treebank import TreebankWordDetokenizer nltk.download("punkt") nltk.download("omw-1.4") nltk.download("stopwords") import regex as re from textblob import TextBlob from sklearn.feature_extraction.text import CountVectorizer def read_files( path: str, vocab_path: str, chunk_size: int, iterator: bool ) -> pd.DataFrame: dtype = {0: np.int32, 1: np.int32, 2: np.int16} header = None skiprows = 3 nrows = 1000000 tp = pd.read_csv( path, sep=" ", header=header, skiprows=skiprows, dtype=dtype, iterator=True, chunksize=chunk_size, nrows=nrows, ) df = pd.concat(tp, ignore_index=True) df.columns = ["document", "word_id", "count"] vocab = pd.read_csv(vocab_path, sep=" ", header=header) vocab["word_id"] = range(1, len(vocab) + 1) vocab.columns = ["word", "word_id"] df = pd.merge(df, vocab, on="word_id", how="inner")[["document", "word", "count"]] return df enron_df = read_files( "../input/uci-bag-of-words/docword.enron.txt", "../input/uci-bag-of-words/vocab.enron.txt", 1000, False, ) kos_df = read_files( "../input/uci-bag-of-words/docword.kos.txt", "../input/uci-bag-of-words/vocab.kos.txt", 1000, False, ) nips_df = read_files( "../input/uci-bag-of-words/docword.nips.txt", "../input/uci-bag-of-words/vocab.nips.txt", 1000, False, ) nytimes_df = read_files( "../input/uci-bag-of-words/docword.nytimes.txt", "../input/uci-bag-of-words/vocab.nytimes.txt", 1000, False, ) nytimes_df.dropna(inplace=True) enron_df.dropna(inplace=True) nips_df.dropna(inplace=True) kos_df.dropna(inplace=True) def get_random_sample(df: pd.DataFrame, sample_size: int) -> pd.DataFrame: max_document_id = df.document.max() random_docs = np.random.randint(1, max_document_id, sample_size) return df[df.document.isin(set(random_docs))] enron_small = get_random_sample(enron, 6000) nytimes_small = get_random_sample(nytimes, 6000) # Compute maximum document ID max_doc_id = max(enron_small["document"]) # Increment document IDs of KOS and NIPS datasets kos["document"] += max_doc_id max_doc_id = max(kos["document"]) nips["document"] += max_doc_id # Increment document IDs of NYTimes dataset nytimes_small["document"] += max(nips["document"]) # Combine all datasets corpus = pd.concat([enron_small, kos, nytimes_small, nips]) corpus # Calculate the number of unique words, documents, and total number of entries in the combined corpus. len(corpus.word.unique()), len(corpus["document"].unique()), len(corpus) # # **- b. Create a word-document matrix from this corpus** # pivot_table_params = { "index": "word", "columns": "document", "values": "count", "fill_value": 0, } td_matrix = pd.pivot_table(combined_corpus, **pivot_table_params) td_matrix.head() # creates a pivot table td_matrix from the combined_corpus DataFrame where the rows correspond to unique words, # the columns correspond to unique documents, and the values correspond to the count of each word in each document # # C. Using SKLearn, find the truncated singular value decomposition of this matrix,retaining the first 100 dimensions # * i. Are these dimensions interpretable? # * ii. What does dimension 1 represent # * iii. What do the top 10 dimensions represent? from sklearn.decomposition import TruncatedSVD # Create an instance of TruncatedSVD with specified parameters svd = TruncatedSVD(n_components=100, n_iter=8, random_state=33) # Fit the TruncatedSVD model on the term-document matrix svd.fit(td_matrix) # Transform the term-document matrix using the fitted TruncatedSVD model transformed_td = pd.DataFrame(svd.transform(td_matrix), index=td_matrix.index) print(svd.explained_variance_ratio_) print(svd.singular_values_) transformed_td # 1. the resulting dimensions is not interpretable since the original features are represented as linear combinations of the new dimensions. However, the first dimension is generally considered to be the most important, as it captures the most variance in the data. # 2. the first dimension represents the most important component, and its values for each word can be interpreted relative to the values of other words. For instance, if the value for word "aaa" is lower than that of "aaai," it could mean that "aaa" appeared less frequently in the most important documents. # 3. As mentioned earlier, these dimensions capture the most variance in the data and are likely to correspond to the most important features. # # D:Determine the average cosine similarity between documents within in each corpus.Next, determine the average cosine similarity between documents across corpora. from sklearn.metrics.pairwise import cosine_similarity matrices = { "enron_small": enron_small, "nips": nips, "kos": kos, "nytimes_small": nytimes_small, } similarities = {} for name, matrix in matrices.items(): matrix = matrix.pivot_table( index="word", columns="document", values="count", fill_value=0 ) similarities[name + "_similarity"] = cosine_similarity(matrix.T, matrix.T) for name, sim_matrix in similarities.items(): print(name + ":") print(sim_matrix) print("\n") avg_enron_similarity = np.array( ( similarities["enron_small_similarity"].sum(1) - np.diag(similarities["enron_small_similarity"]) ) / (similarities["enron_small_similarity"].shape[1] - 1) ).mean() avg_nips_similarity = np.array( (similarities["nips_similarity"].sum(1) - np.diag(similarities["nips_similarity"])) / (similarities["nips_similarity"].shape[1] - 1) ).mean() avg_kos_similarity = np.array( (similarities["kos_similarity"].sum(1) - np.diag(similarities["kos_similarity"])) / (similarities["kos_similarity"].shape[1] - 1) ).mean() avg_ny_similarity = np.array( ( similarities["nytimes_small_similarity"].sum(1) - np.diag(similarities["nytimes_small_similarity"]) ) / (similarities["nytimes_small_similarity"].shape[1] - 1) ).mean() print(f"Average similarity Enron corpus: {avg_enron_similarity}") print(f"Average similarity Nips corpus: {avg_nips_similarity}") print(f"Average similarity Kos corpus: {avg_kos_similarity}") print(f"Average similarity NYTimes corpus: {avg_ny_similarity}") from itertools import combinations for pair in combinations(similarities.items(), 2): similarity = cosine_similarity(pair[0][1][:100].T, pair[1][1][:100].T) print(f"Similarity between {pair[0][0]} and {pair[1][0]}: {similarity[0][0]}") # # e. Does LSA work well as a tool for clustering corpora? # We think so , since LSA is effective for clustering corpora, as it can produce topics and cluster data within the corpora, and also calculate document and corpus similarities that are meaningful in this context. # # f.Try to use PCA instead of LSA. What are your results? Are they replaceable? Can you # # achieve same results using PCA? Why? # from sklearn.decomposition import PCA # Enron corpus pca = PCA(n_components=100) enron_pca = pca.fit_transform(similarities["enron_small_similarity"].T) enron_centroids = pca.singular_values_ # NIPS corpus pca = PCA(n_components=100) nips_pca = pca.fit_transform(similarities["nips_similarity"].T) nips_centroids = pca.singular_values_ # KOS corpus pca = PCA(n_components=100) kos_pca = pca.fit_transform(similarities["kos_similarity"].T) kos_centroids = pca.singular_values_ # NYTimes corpus pca = PCA(n_components=100) nytimes_pca = pca.fit_transform(similarities["nytimes_small_similarity"].T) nytimes_centroids = pca.singular_values_ # Similarity calculations enron_similarity = cosine_similarity(enron_pca.T, enron_pca.T) nips_similarity = cosine_similarity(nips_pca.T, nips_pca.T) kos_similarity = cosine_similarity(kos_pca.T, kos_pca.T) nytimes_similarity = cosine_similarity(nytimes_pca.T, nytimes_pca.T) # Average similarity between documents in each corpus enron_avg_similarity = np.array( (enron_similarity.sum(1) - np.diag(enron_similarity)) / (enron_similarity.shape[1] - 1) ).mean() nips_avg_similarity = np.array( (nips_similarity.sum(1) - np.diag(nips_similarity)) / (nips_similarity.shape[1] - 1) ).mean() kos_avg_similarity = np.array( (kos_similarity.sum(1) - np.diag(kos_similarity)) / (kos_similarity.shape[1] - 1) ).mean() nytimes_avg_similarity = np.array( (nytimes_similarity.sum(1) - np.diag(nytimes_similarity)) / (nytimes_similarity.shape[1] - 1) ).mean() print(f"Average similarity between documents in Enron corpus: {enron_avg_similarity}") print(f"Average similarity between documents in Nips corpus: {nips_avg_similarity}") print(f"Average similarity between documents in KOS corpus: {kos_avg_similarity}") print( f"Average similarity between documents in NYTimes corpus: {nytimes_avg_similarity}" ) # Similarity between centroids enron_nips_similarity = cosine_similarity( enron_centroids.reshape(1, -1), nips_centroids.reshape(1, -1) ) enron_kos_similarity = cosine_similarity( enron_centroids.reshape(1, -1), kos_centroids.reshape(1, -1) ) enron_nytimes_similarity = cosine_similarity( enron_centroids.reshape(1, -1), nytimes_centroids.reshape(1, -1) ) nips_kos_similarity = cosine_similarity( nips_centroids.reshape(1, -1), kos_centroids.reshape(1, -1) ) nips_nytimes_similarity = cosine_similarity( nips_centroids.reshape(1, -1), nytimes_centroids.reshape(1, -1) ) kos_nytimes_similarity = cosine_similarity( kos_centroids.reshape(1, -1), nytimes_centroids.reshape(1, -1) ) print(f"Similarity between Enron and NIPS centroids: {enron_nips_similarity[0][0]}") print(f"Similarity between Enron and KOS centroids: {enron_kos_similarity[0][0]}") print( f"Similarity between Enron and NYTimes centroids: {enron_nytimes_similarity[0][0]}" ) print(f"Similarity between NIPS and KOS centroids: {nips_kos_similarity[0][0]}") print(f"Similarity between NIPS and NYTimes centroids: {nips_nytimes_similarity[0][0]}")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/161/129161247.ipynb
uci-bag-of-words
aslanovmustafa
[{"Id": 129161247, "ScriptId": 38394842, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2358759, "CreationDate": "05/11/2023 12:53:36", "VersionNumber": 1.0, "Title": "notebookb5ee88a66d", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 236.0, "LinesInsertedFromPrevious": 236.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 184963519, "KernelVersionId": 129161247, "SourceDatasetVersionId": 3639953}]
[{"Id": 3639953, "DatasetId": 2180053, "DatasourceVersionId": 3693638, "CreatorUserId": 8278966, "LicenseName": "Unknown", "CreationDate": "05/15/2022 12:47:05", "VersionNumber": 1.0, "Title": "UCI Bag Of Words", "Slug": "uci-bag-of-words", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 2180053, "CreatorUserId": 8278966, "OwnerUserId": 8278966.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3639953.0, "CurrentDatasourceVersionId": 3693638.0, "ForumId": 2206001, "Type": 2, "CreationDate": "05/15/2022 12:47:05", "LastActivityDate": "05/15/2022", "TotalViews": 587, "TotalDownloads": 68, "TotalVotes": 1, "TotalKernels": 15}]
[{"Id": 8278966, "UserName": "aslanovmustafa", "DisplayName": "Mustafa Aslanov", "RegisterDate": "09/04/2021", "PerformanceTier": 0}]
# 1. Bag of Words dataset # - a. Combine three of the datasets into a common corpus # import math import random import string import time from typing import List from collections import Counter from multiprocessing import Pool import numpy as np import pandas as pd from scipy import sparse from tqdm import tqdm import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize, sent_tokenize from nltk.lm import MLE from nltk.lm.preprocessing import padded_everygram_pipeline from nltk.tokenize.treebank import TreebankWordDetokenizer nltk.download("punkt") nltk.download("omw-1.4") nltk.download("stopwords") import regex as re from textblob import TextBlob from sklearn.feature_extraction.text import CountVectorizer def read_files( path: str, vocab_path: str, chunk_size: int, iterator: bool ) -> pd.DataFrame: dtype = {0: np.int32, 1: np.int32, 2: np.int16} header = None skiprows = 3 nrows = 1000000 tp = pd.read_csv( path, sep=" ", header=header, skiprows=skiprows, dtype=dtype, iterator=True, chunksize=chunk_size, nrows=nrows, ) df = pd.concat(tp, ignore_index=True) df.columns = ["document", "word_id", "count"] vocab = pd.read_csv(vocab_path, sep=" ", header=header) vocab["word_id"] = range(1, len(vocab) + 1) vocab.columns = ["word", "word_id"] df = pd.merge(df, vocab, on="word_id", how="inner")[["document", "word", "count"]] return df enron_df = read_files( "../input/uci-bag-of-words/docword.enron.txt", "../input/uci-bag-of-words/vocab.enron.txt", 1000, False, ) kos_df = read_files( "../input/uci-bag-of-words/docword.kos.txt", "../input/uci-bag-of-words/vocab.kos.txt", 1000, False, ) nips_df = read_files( "../input/uci-bag-of-words/docword.nips.txt", "../input/uci-bag-of-words/vocab.nips.txt", 1000, False, ) nytimes_df = read_files( "../input/uci-bag-of-words/docword.nytimes.txt", "../input/uci-bag-of-words/vocab.nytimes.txt", 1000, False, ) nytimes_df.dropna(inplace=True) enron_df.dropna(inplace=True) nips_df.dropna(inplace=True) kos_df.dropna(inplace=True) def get_random_sample(df: pd.DataFrame, sample_size: int) -> pd.DataFrame: max_document_id = df.document.max() random_docs = np.random.randint(1, max_document_id, sample_size) return df[df.document.isin(set(random_docs))] enron_small = get_random_sample(enron, 6000) nytimes_small = get_random_sample(nytimes, 6000) # Compute maximum document ID max_doc_id = max(enron_small["document"]) # Increment document IDs of KOS and NIPS datasets kos["document"] += max_doc_id max_doc_id = max(kos["document"]) nips["document"] += max_doc_id # Increment document IDs of NYTimes dataset nytimes_small["document"] += max(nips["document"]) # Combine all datasets corpus = pd.concat([enron_small, kos, nytimes_small, nips]) corpus # Calculate the number of unique words, documents, and total number of entries in the combined corpus. len(corpus.word.unique()), len(corpus["document"].unique()), len(corpus) # # **- b. Create a word-document matrix from this corpus** # pivot_table_params = { "index": "word", "columns": "document", "values": "count", "fill_value": 0, } td_matrix = pd.pivot_table(combined_corpus, **pivot_table_params) td_matrix.head() # creates a pivot table td_matrix from the combined_corpus DataFrame where the rows correspond to unique words, # the columns correspond to unique documents, and the values correspond to the count of each word in each document # # C. Using SKLearn, find the truncated singular value decomposition of this matrix,retaining the first 100 dimensions # * i. Are these dimensions interpretable? # * ii. What does dimension 1 represent # * iii. What do the top 10 dimensions represent? from sklearn.decomposition import TruncatedSVD # Create an instance of TruncatedSVD with specified parameters svd = TruncatedSVD(n_components=100, n_iter=8, random_state=33) # Fit the TruncatedSVD model on the term-document matrix svd.fit(td_matrix) # Transform the term-document matrix using the fitted TruncatedSVD model transformed_td = pd.DataFrame(svd.transform(td_matrix), index=td_matrix.index) print(svd.explained_variance_ratio_) print(svd.singular_values_) transformed_td # 1. the resulting dimensions is not interpretable since the original features are represented as linear combinations of the new dimensions. However, the first dimension is generally considered to be the most important, as it captures the most variance in the data. # 2. the first dimension represents the most important component, and its values for each word can be interpreted relative to the values of other words. For instance, if the value for word "aaa" is lower than that of "aaai," it could mean that "aaa" appeared less frequently in the most important documents. # 3. As mentioned earlier, these dimensions capture the most variance in the data and are likely to correspond to the most important features. # # D:Determine the average cosine similarity between documents within in each corpus.Next, determine the average cosine similarity between documents across corpora. from sklearn.metrics.pairwise import cosine_similarity matrices = { "enron_small": enron_small, "nips": nips, "kos": kos, "nytimes_small": nytimes_small, } similarities = {} for name, matrix in matrices.items(): matrix = matrix.pivot_table( index="word", columns="document", values="count", fill_value=0 ) similarities[name + "_similarity"] = cosine_similarity(matrix.T, matrix.T) for name, sim_matrix in similarities.items(): print(name + ":") print(sim_matrix) print("\n") avg_enron_similarity = np.array( ( similarities["enron_small_similarity"].sum(1) - np.diag(similarities["enron_small_similarity"]) ) / (similarities["enron_small_similarity"].shape[1] - 1) ).mean() avg_nips_similarity = np.array( (similarities["nips_similarity"].sum(1) - np.diag(similarities["nips_similarity"])) / (similarities["nips_similarity"].shape[1] - 1) ).mean() avg_kos_similarity = np.array( (similarities["kos_similarity"].sum(1) - np.diag(similarities["kos_similarity"])) / (similarities["kos_similarity"].shape[1] - 1) ).mean() avg_ny_similarity = np.array( ( similarities["nytimes_small_similarity"].sum(1) - np.diag(similarities["nytimes_small_similarity"]) ) / (similarities["nytimes_small_similarity"].shape[1] - 1) ).mean() print(f"Average similarity Enron corpus: {avg_enron_similarity}") print(f"Average similarity Nips corpus: {avg_nips_similarity}") print(f"Average similarity Kos corpus: {avg_kos_similarity}") print(f"Average similarity NYTimes corpus: {avg_ny_similarity}") from itertools import combinations for pair in combinations(similarities.items(), 2): similarity = cosine_similarity(pair[0][1][:100].T, pair[1][1][:100].T) print(f"Similarity between {pair[0][0]} and {pair[1][0]}: {similarity[0][0]}") # # e. Does LSA work well as a tool for clustering corpora? # We think so , since LSA is effective for clustering corpora, as it can produce topics and cluster data within the corpora, and also calculate document and corpus similarities that are meaningful in this context. # # f.Try to use PCA instead of LSA. What are your results? Are they replaceable? Can you # # achieve same results using PCA? Why? # from sklearn.decomposition import PCA # Enron corpus pca = PCA(n_components=100) enron_pca = pca.fit_transform(similarities["enron_small_similarity"].T) enron_centroids = pca.singular_values_ # NIPS corpus pca = PCA(n_components=100) nips_pca = pca.fit_transform(similarities["nips_similarity"].T) nips_centroids = pca.singular_values_ # KOS corpus pca = PCA(n_components=100) kos_pca = pca.fit_transform(similarities["kos_similarity"].T) kos_centroids = pca.singular_values_ # NYTimes corpus pca = PCA(n_components=100) nytimes_pca = pca.fit_transform(similarities["nytimes_small_similarity"].T) nytimes_centroids = pca.singular_values_ # Similarity calculations enron_similarity = cosine_similarity(enron_pca.T, enron_pca.T) nips_similarity = cosine_similarity(nips_pca.T, nips_pca.T) kos_similarity = cosine_similarity(kos_pca.T, kos_pca.T) nytimes_similarity = cosine_similarity(nytimes_pca.T, nytimes_pca.T) # Average similarity between documents in each corpus enron_avg_similarity = np.array( (enron_similarity.sum(1) - np.diag(enron_similarity)) / (enron_similarity.shape[1] - 1) ).mean() nips_avg_similarity = np.array( (nips_similarity.sum(1) - np.diag(nips_similarity)) / (nips_similarity.shape[1] - 1) ).mean() kos_avg_similarity = np.array( (kos_similarity.sum(1) - np.diag(kos_similarity)) / (kos_similarity.shape[1] - 1) ).mean() nytimes_avg_similarity = np.array( (nytimes_similarity.sum(1) - np.diag(nytimes_similarity)) / (nytimes_similarity.shape[1] - 1) ).mean() print(f"Average similarity between documents in Enron corpus: {enron_avg_similarity}") print(f"Average similarity between documents in Nips corpus: {nips_avg_similarity}") print(f"Average similarity between documents in KOS corpus: {kos_avg_similarity}") print( f"Average similarity between documents in NYTimes corpus: {nytimes_avg_similarity}" ) # Similarity between centroids enron_nips_similarity = cosine_similarity( enron_centroids.reshape(1, -1), nips_centroids.reshape(1, -1) ) enron_kos_similarity = cosine_similarity( enron_centroids.reshape(1, -1), kos_centroids.reshape(1, -1) ) enron_nytimes_similarity = cosine_similarity( enron_centroids.reshape(1, -1), nytimes_centroids.reshape(1, -1) ) nips_kos_similarity = cosine_similarity( nips_centroids.reshape(1, -1), kos_centroids.reshape(1, -1) ) nips_nytimes_similarity = cosine_similarity( nips_centroids.reshape(1, -1), nytimes_centroids.reshape(1, -1) ) kos_nytimes_similarity = cosine_similarity( kos_centroids.reshape(1, -1), nytimes_centroids.reshape(1, -1) ) print(f"Similarity between Enron and NIPS centroids: {enron_nips_similarity[0][0]}") print(f"Similarity between Enron and KOS centroids: {enron_kos_similarity[0][0]}") print( f"Similarity between Enron and NYTimes centroids: {enron_nytimes_similarity[0][0]}" ) print(f"Similarity between NIPS and KOS centroids: {nips_kos_similarity[0][0]}") print(f"Similarity between NIPS and NYTimes centroids: {nips_nytimes_similarity[0][0]}")
false
0
3,284
0
3,311
3,284
129526612
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import pandas as pd import plotly.graph_objects as go import plotly.express as px import seaborn as sns df = pd.read_csv( "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/4c3aa8ea6e.csv" ) df df["Valid"].value_counts(), df["Task"].value_counts() # Valid There were cases during the video annotation that were hard for the annotator to decide if there was an Akinetic # (i.e., essentially no movement) FoG or the subject stopped voluntarily. Only event annotations where the series is marked true should be considered as unambiguous. # Task Series were only annotated where this value is true. Portions marked false should be considered unannotated. valid_df = df[df["Valid"] == True] valid_df valid_df.reset_index(inplace=True, drop=True) valid_df print(valid_df["Valid"].value_counts(), valid_df["Task"].value_counts()) print( valid_df["StartHesitation"].value_counts(), valid_df["Turn"].value_counts(), valid_df["Walking"].value_counts(), ) fig = go.Figure() fig.add_trace(go.Scatter(x=valid_df.index, y=valid_df.AccV, name="AccV")) fig.add_trace(go.Scatter(x=valid_df.index, y=valid_df.AccML, name="AccML")) fig.add_trace(go.Scatter(x=valid_df.index, y=valid_df.AccAP, name="AccAP")) fig.add_trace( go.Scatter( x=valid_df.index, y=valid_df.StartHesitation.replace({0: -2, 1: 2}), name="StartHesitation", mode="lines", ) ) fig.add_trace( go.Scatter( x=valid_df.index, y=valid_df.Turn.replace({0: -2, 1: 2}), name="Turn", mode="lines", ) ) fig.add_trace( go.Scatter( x=valid_df.index.to_list(), y=valid_df.Walking.replace({0: -2, 1: 2}), mode="lines", name="Walking", ) ) fig.update_layout( title="Time series of accelerometer in defog dataset( after removal of Valid = False and Task = False)", xaxis_title="time index", yaxis_title="Amplitude", ) # Display the Plotly figure fig.show() # chweck if more than one indicator can be 1 at a time valid_df["sum"] = valid_df["StartHesitation"] + valid_df["Turn"] + valid_df["Walking"] valid_df["sum"].value_counts() sns.pairplot( data=valid_df[["AccV", "AccML", "AccAP", "StartHesitation"]], hue="StartHesitation" ) sns.pairplot(data=valid_df[["AccV", "AccML", "AccAP", "Turn"]], hue="Turn") sns.pairplot(data=valid_df[["AccV", "AccML", "AccAP", "Walking"]], hue="Walking") import plotly.express as px import pandas as pd df_melt = pd.melt( valid_df, id_vars=["Walking"], value_vars=["AccV", "AccML", "AccAP"], var_name="Value", value_name="Measurement", ) fig = px.box(df_melt, x="Walking", y="Measurement", color="Value") fig.update_layout( title="Walking state (Valid) box plot for all threes sensor", xaxis_title="Walking state", yaxis_title="Amplitude", ) fig.show() import plotly.express as px import pandas as pd df_melt = pd.melt( valid_df, id_vars=["Turn"], value_vars=["AccV", "AccML", "AccAP"], var_name="Value", value_name="Measurement", ) fig = px.box(df_melt, x="Turn", y="Measurement", color="Value") fig.update_layout( title="Turn state (Valid) box plot for all threes sensor", xaxis_title="Turn state", yaxis_title="Amplitude", ) fig.show() import plotly.express as px import pandas as pd df_melt = pd.melt( valid_df, id_vars=["StartHesitation"], value_vars=["AccV", "AccML", "AccAP"], var_name="Value", value_name="Measurement", ) fig = px.box(df_melt, x="StartHesitation", y="Measurement", color="Value") fig.update_layout( title="StartHesitation state (Valid) box plot for all threes sensor", xaxis_title="StartHesitation state", yaxis_title="Amplitude", ) fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/526/129526612.ipynb
null
null
[{"Id": 129526612, "ScriptId": 38514333, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4875831, "CreationDate": "05/14/2023 14:42:20", "VersionNumber": 1.0, "Title": "Basic EDA", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 132.0, "LinesInsertedFromPrevious": 132.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import pandas as pd import plotly.graph_objects as go import plotly.express as px import seaborn as sns df = pd.read_csv( "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/4c3aa8ea6e.csv" ) df df["Valid"].value_counts(), df["Task"].value_counts() # Valid There were cases during the video annotation that were hard for the annotator to decide if there was an Akinetic # (i.e., essentially no movement) FoG or the subject stopped voluntarily. Only event annotations where the series is marked true should be considered as unambiguous. # Task Series were only annotated where this value is true. Portions marked false should be considered unannotated. valid_df = df[df["Valid"] == True] valid_df valid_df.reset_index(inplace=True, drop=True) valid_df print(valid_df["Valid"].value_counts(), valid_df["Task"].value_counts()) print( valid_df["StartHesitation"].value_counts(), valid_df["Turn"].value_counts(), valid_df["Walking"].value_counts(), ) fig = go.Figure() fig.add_trace(go.Scatter(x=valid_df.index, y=valid_df.AccV, name="AccV")) fig.add_trace(go.Scatter(x=valid_df.index, y=valid_df.AccML, name="AccML")) fig.add_trace(go.Scatter(x=valid_df.index, y=valid_df.AccAP, name="AccAP")) fig.add_trace( go.Scatter( x=valid_df.index, y=valid_df.StartHesitation.replace({0: -2, 1: 2}), name="StartHesitation", mode="lines", ) ) fig.add_trace( go.Scatter( x=valid_df.index, y=valid_df.Turn.replace({0: -2, 1: 2}), name="Turn", mode="lines", ) ) fig.add_trace( go.Scatter( x=valid_df.index.to_list(), y=valid_df.Walking.replace({0: -2, 1: 2}), mode="lines", name="Walking", ) ) fig.update_layout( title="Time series of accelerometer in defog dataset( after removal of Valid = False and Task = False)", xaxis_title="time index", yaxis_title="Amplitude", ) # Display the Plotly figure fig.show() # chweck if more than one indicator can be 1 at a time valid_df["sum"] = valid_df["StartHesitation"] + valid_df["Turn"] + valid_df["Walking"] valid_df["sum"].value_counts() sns.pairplot( data=valid_df[["AccV", "AccML", "AccAP", "StartHesitation"]], hue="StartHesitation" ) sns.pairplot(data=valid_df[["AccV", "AccML", "AccAP", "Turn"]], hue="Turn") sns.pairplot(data=valid_df[["AccV", "AccML", "AccAP", "Walking"]], hue="Walking") import plotly.express as px import pandas as pd df_melt = pd.melt( valid_df, id_vars=["Walking"], value_vars=["AccV", "AccML", "AccAP"], var_name="Value", value_name="Measurement", ) fig = px.box(df_melt, x="Walking", y="Measurement", color="Value") fig.update_layout( title="Walking state (Valid) box plot for all threes sensor", xaxis_title="Walking state", yaxis_title="Amplitude", ) fig.show() import plotly.express as px import pandas as pd df_melt = pd.melt( valid_df, id_vars=["Turn"], value_vars=["AccV", "AccML", "AccAP"], var_name="Value", value_name="Measurement", ) fig = px.box(df_melt, x="Turn", y="Measurement", color="Value") fig.update_layout( title="Turn state (Valid) box plot for all threes sensor", xaxis_title="Turn state", yaxis_title="Amplitude", ) fig.show() import plotly.express as px import pandas as pd df_melt = pd.melt( valid_df, id_vars=["StartHesitation"], value_vars=["AccV", "AccML", "AccAP"], var_name="Value", value_name="Measurement", ) fig = px.box(df_melt, x="StartHesitation", y="Measurement", color="Value") fig.update_layout( title="StartHesitation state (Valid) box plot for all threes sensor", xaxis_title="StartHesitation state", yaxis_title="Amplitude", ) fig.show()
false
0
1,435
1
1,435
1,435
129526437
<jupyter_start><jupyter_text>Satellite Image Classification ## Context Satellite image Classification Dataset-RSI-CB256 , This dataset has 4 different classes mixed from Sensors and google map snapshot ## Content The past years have witnessed great progress on remote sensing (RS) image interpretation and its wide applications. With RS images becoming more accessible than ever before, there is an increasing demand for the automatic interpretation of these images. In this context, the benchmark datasets serve as essential prerequisites for developing and testing intelligent interpretation algorithms. After reviewing existing benchmark datasets in the research community of RS image interpretation, this article discusses the problem of how to efficiently prepare a suitable benchmark dataset for RS image interpretation. Specifically, we first analyze the current challenges of developing intelligent algorithms for RS image interpretation with bibliometric investigations. We then present the general guidance on creating benchmark datasets in efficient manners. Following the presented guidance, we also provide an example on building RS image dataset, i.e., Million-AID, a new large-scale benchmark dataset containing a million instances for RS image scene classification. Several challenges and perspectives in RS image annotation are finally discussed to facilitate the research in benchmark dataset construction. We do hope this paper will provide the RS community an overall perspective on constructing large-scale and practical image datasets for further research, especially data-driven ones. Kaggle dataset identifier: satellite-image-classification <jupyter_script>import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tensorflow.keras import losses, optimizers, models, optimizers from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import ( Input, Dense, Conv2D, BatchNormalization, Dropout, MaxPooling2D, GlobalAveragePooling2D, Flatten, ) from tensorflow.keras.utils import image_dataset_from_directory batch_size = 32 image_size = [100, 100] epochs = 50 train_ds = image_dataset_from_directory( "/kaggle/input/satellite-image-classification/data", image_size=image_size, batch_size=batch_size, shuffle=True, ) class_names = train_ds.class_names for img, lab in train_ds.take(1): plt.figure(figsize=(10, 10)) for i in range(9): plt.subplot(3, 3, i + 1) plt.imshow(img[i].numpy().astype("uint8")) plt.title(class_names[lab[i]]) plt.axis("off") train_ds = train_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE) K = len(class_names) inputs = Input(shape=image_size + [3]) x = Conv2D(16, 3, padding="same", activation="relu")(inputs) x = BatchNormalization()(x) x = MaxPooling2D()(x) x = Conv2D(32, 3, padding="same", activation="relu")(x) x = BatchNormalization()(x) x = MaxPooling2D()(x) x = Conv2D(64, 3, padding="same", activation="relu")(x) x = BatchNormalization()(x) x = MaxPooling2D()(x) x = Flatten()(x) x = Dense(128, activation="relu")(x) x = Dropout(0.2)(x) outputs = Dense(K, activation="softmax")(x) model = Model(inputs, outputs) model.summary() model.compile( loss=losses.SparseCategoricalCrossentropy(), optimizer=optimizers.Adam(learning_rate=0.001), metrics="accuracy", ) r = model.fit(train_ds, epochs=10) plt.figure(figsize=(10, 5)) plt.subplot(1, 2, 1) plt.plot(r.history["loss"], marker="s") plt.legend(["train"]) plt.subplot(1, 2, 2) plt.plot(r.history["accuracy"], marker="o", color="orange") plt.legend(["train"]) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/526/129526437.ipynb
satellite-image-classification
mahmoudreda55
[{"Id": 129526437, "ScriptId": 38314695, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14880201, "CreationDate": "05/14/2023 14:40:45", "VersionNumber": 1.0, "Title": "satelite_image_classification", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 70.0, "LinesInsertedFromPrevious": 70.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185673588, "KernelVersionId": 129526437, "SourceDatasetVersionId": 2546969}]
[{"Id": 2546969, "DatasetId": 1544742, "DatasourceVersionId": 2589964, "CreatorUserId": 4268983, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "08/21/2021 18:13:09", "VersionNumber": 1.0, "Title": "Satellite Image Classification", "Slug": "satellite-image-classification", "Subtitle": "Satellite Remote Sensing Image -RSI-CB256", "Description": "## Context\nSatellite image Classification Dataset-RSI-CB256 , This dataset has 4 different classes mixed from Sensors and google map snapshot\n\n## Content\nThe past years have witnessed great progress on remote sensing (RS) image interpretation and its wide applications. With RS images becoming more accessible than ever before, there is an increasing demand for the automatic interpretation of these images. In this context, the benchmark datasets serve as essential prerequisites for developing and testing intelligent interpretation algorithms. After reviewing existing benchmark datasets in the research community of RS image interpretation, this article discusses the problem of how to efficiently prepare a suitable benchmark dataset for RS image interpretation. Specifically, we first analyze the current challenges of developing intelligent algorithms for RS image interpretation with bibliometric investigations. We then present the general guidance on creating benchmark datasets in efficient manners. Following the presented guidance, we also provide an example on building RS image dataset, i.e., Million-AID, a new large-scale benchmark dataset containing a million instances for RS image scene classification. Several challenges and perspectives in RS image annotation are finally discussed to facilitate the research in benchmark dataset construction. We do hope this paper will provide the RS community an overall perspective on constructing large-scale and practical image datasets for further research, especially data-driven ones.\n\n\n### Acknowledgements\n\nAnnotated Datasets for RS Image Interpretation\nThe interpretation of RS images has been playing an increasingly important role in a large diversity of applications, and thus, has attracted remarkable research attentions. Consequently, various datasets have been built to advance the development of interpretation algorithms for RS images. Covering literature published over the past decade, we perform a systematic review of the existing RS image datasets concerning the current mainstream of RS image interpretation tasks, including scene classification, object detection, semantic segmentation and change detection.\n\n\n### Inspiration\n\nArtificial Intelligence, Computer Vision, Image Processing, Deep Learning, Satellite Image, Remote Sensing", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1544742, "CreatorUserId": 4268983, "OwnerUserId": 4268983.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2546969.0, "CurrentDatasourceVersionId": 2589964.0, "ForumId": 1564636, "Type": 2, "CreationDate": "08/21/2021 18:13:09", "LastActivityDate": "08/21/2021", "TotalViews": 71066, "TotalDownloads": 6814, "TotalVotes": 89, "TotalKernels": 83}]
[{"Id": 4268983, "UserName": "mahmoudreda55", "DisplayName": "Mahmoud Reda", "RegisterDate": "12/31/2019", "PerformanceTier": 2}]
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tensorflow.keras import losses, optimizers, models, optimizers from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import ( Input, Dense, Conv2D, BatchNormalization, Dropout, MaxPooling2D, GlobalAveragePooling2D, Flatten, ) from tensorflow.keras.utils import image_dataset_from_directory batch_size = 32 image_size = [100, 100] epochs = 50 train_ds = image_dataset_from_directory( "/kaggle/input/satellite-image-classification/data", image_size=image_size, batch_size=batch_size, shuffle=True, ) class_names = train_ds.class_names for img, lab in train_ds.take(1): plt.figure(figsize=(10, 10)) for i in range(9): plt.subplot(3, 3, i + 1) plt.imshow(img[i].numpy().astype("uint8")) plt.title(class_names[lab[i]]) plt.axis("off") train_ds = train_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE) K = len(class_names) inputs = Input(shape=image_size + [3]) x = Conv2D(16, 3, padding="same", activation="relu")(inputs) x = BatchNormalization()(x) x = MaxPooling2D()(x) x = Conv2D(32, 3, padding="same", activation="relu")(x) x = BatchNormalization()(x) x = MaxPooling2D()(x) x = Conv2D(64, 3, padding="same", activation="relu")(x) x = BatchNormalization()(x) x = MaxPooling2D()(x) x = Flatten()(x) x = Dense(128, activation="relu")(x) x = Dropout(0.2)(x) outputs = Dense(K, activation="softmax")(x) model = Model(inputs, outputs) model.summary() model.compile( loss=losses.SparseCategoricalCrossentropy(), optimizer=optimizers.Adam(learning_rate=0.001), metrics="accuracy", ) r = model.fit(train_ds, epochs=10) plt.figure(figsize=(10, 5)) plt.subplot(1, 2, 1) plt.plot(r.history["loss"], marker="s") plt.legend(["train"]) plt.subplot(1, 2, 2) plt.plot(r.history["accuracy"], marker="o", color="orange") plt.legend(["train"]) plt.show()
false
0
670
0
973
670
129526468
<jupyter_start><jupyter_text>Suicide Attempts in Shandong, China ``` Data on serious suicide attempts in Shandong, China A data frame with 2571 observations on the following 11 variables. ``` | Column | Description | | --- | --- | | Person_ID | ID number of victims | | Hospitalised | Hospitalized? (no or yes) | | Died | Died? (no or yes) | | Urban | Urban area? (no, unknown, or yes) | | Year | Year (2009, 2010, or 2011) | | Month | Month (1=Jan through 12=December) | | Sex | Sex (female or male) | | Age | Age (years) | | Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) | | Occupation | One of ten occupation categories | | method | One of nine possible methods | ### Details Data from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China. ## Source Sun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) "Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study," BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762 Kaggle dataset identifier: suicide-attempts-in-shandong-china <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") from matplotlib import rcParams df = pd.read_csv("/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv") df.head() # **Exploring the dataset** df.shape # Work in the columns. df.columns df.columns = df.columns.str.lower() df.rename(columns={"Unnamed: 0": "unnamed"}).head(2) # Dropping the columns that won't be useful. df.drop(["unnamed: 0", "person_id"], axis=1, inplace=True) df.head(1) # Exploring and understanding data. df.info() # Each data type is ok, no correction needed. df.isnull().sum() # There is no null values (Too good to be true). df.duplicated().value_counts() df.drop_duplicates(inplace=True) df.duplicated().value_counts() # The database has no duplicate or missing values. # **Understanding data** # Starting with qualitative data. df.hospitalised.value_counts().reset_index() df.hospitalised.value_counts(normalize=True).reset_index().round(2) # Sixty percent of the attempt of suicide were hospitalized. fig, ax = plt.subplots(figsize=(4, 4)) ax = sns.countplot(data=df, x=df["hospitalised"]) ax.set_title("Suicide Attempts Hospitalized", size=14) ax.set_ylabel("Count", size=15) ax.set_xlabel("People hospitalized", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=12) plt.show() df.died.value_counts().reset_index().round(3) df.died.value_counts(normalize=True).reset_index().round(3) # Almost half of people who attempt to commit suicide died. fig, ax = plt.subplots(figsize=(4, 4)) ax = sns.countplot(data=df, x=df["died"]) ax.set_title("Suicide Attempts Died", size=16) ax.set_ylabel("Count", size=15) ax.set_xlabel("Died", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=12) plt.show() # How many people were hospitalized and died?. hospitalized_died = df[(df["hospitalised"] == "yes") & (df["died"] == "yes")] hospitalized_died.value_counts().sum() hos_and_died = (238 * 100) / 2571 print(hos_and_died) # 9 % of the people who attempted to suicide were hospitalized and died. df.urban.value_counts().reset_index() df.urban.value_counts(normalize=True).reset_index().round(2) # It's interesting that the majority of the cases are no urban. fig, ax = plt.subplots(figsize=(4, 4)) sns.countplot(data=df, x=df["urban"]) ax.set_title("Suicide Attempts Urban Areas", size=16) ax.set_ylabel("Count", size=15) ax.set_xlabel("Urban", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=12) plt.show() df.sex.value_counts().reset_index() df.sex.value_counts(normalize=True).reset_index().round(2) # According to data there is no difference between sex at the time to attempt to commit suicide (At least in China) fig, ax = plt.subplots(figsize=(4, 4)) sns.countplot(data=df, x="sex") ax.set_title("Suicide Attempts by Sex", size=16) ax.set_ylabel("Count", size=15) ax.set_xlabel("Sex", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=12) plt.show() df.education.value_counts().reset_index() df.education.value_counts(normalize=True).reset_index().round(2) fig, ax = plt.subplots(figsize=(6, 6)) sns.countplot(data=df, x="education") ax.set_title("Suicide Attempts by education", size=14) ax.set_ylabel("Count", size=15) ax.set_xlabel("Education", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=12) plt.show() df.occupation.value_counts().reset_index() df.occupation.value_counts(normalize=True).reset_index().round(3) # This is a surprising founding, there is a lot of farmers that attempts to suicide! fig, ax = plt.subplots(figsize=(8, 4)) sns.countplot(data=df, x="occupation") ax.set_title("Suicide by occupation", size=18) ax.set_ylabel("Count", size=15) ax.set_xlabel("Occupation", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=10, rotation=30) plt.show() df.method.value_counts().reset_index() df.method.value_counts(normalize=True).reset_index().round(3) fig, ax = plt.subplots(figsize=(8, 4)) sns.countplot(data=df, x="method") ax.set_title("Suicide by method", size=14) ax.set_ylabel("Count", size=15) ax.set_xlabel("method", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=10, rotation=30) plt.show() # Interesting foundings, most of the people who try to commit suicide are farmer and try to do it with pesticide. # Is that a coincidence?, no, it is not, pesticides are the first thing that a farmer has to try to kill him self. # Analizing numerical data: df.head(2) p = sns.displot(data=df, x="age", bins=18, kde=True).set( title="Attemp to suicide in function of the Age" ) plt.gcf().set_size_inches(6, 4) plt.show() # It looks like the most frequent age to attempt to commit suicide is between 40 and 60 (Life is hard guys) p = sns.displot(data=df, x="month", bins=12, kde=True) plt.gcf().set_size_inches(6, 4) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/526/129526468.ipynb
suicide-attempts-in-shandong-china
utkarshx27
[{"Id": 129526468, "ScriptId": 38509059, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8193952, "CreationDate": "05/14/2023 14:41:11", "VersionNumber": 1.0, "Title": "Suicide Attempts in Shandong, China", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 215.0, "LinesInsertedFromPrevious": 215.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185673672, "KernelVersionId": 129526468, "SourceDatasetVersionId": 5617993}]
[{"Id": 5617993, "DatasetId": 3230370, "DatasourceVersionId": 5693173, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/06/2023 11:54:22", "VersionNumber": 1.0, "Title": "Suicide Attempts in Shandong, China", "Slug": "suicide-attempts-in-shandong-china", "Subtitle": "Serious Suicide Attempts in Shandong, China: Three-Year Study", "Description": "```\nData on serious suicide attempts in Shandong, China\nA data frame with 2571 observations on the following 11 variables.\n```\n\n| Column | Description |\n| --- | --- |\n| Person_ID | ID number of victims |\n| Hospitalised | Hospitalized? (no or yes) |\n| Died | Died? (no or yes) |\n| Urban | Urban area? (no, unknown, or yes) |\n| Year | Year (2009, 2010, or 2011) |\n| Month | Month (1=Jan through 12=December) |\n| Sex | Sex (female or male) |\n| Age | Age (years) |\n| Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) |\n| Occupation | One of ten occupation categories |\n| method | One of nine possible methods |\n\n### Details \nData from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China.\n\n## Source\nSun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) \"Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study,\" BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3230370, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5617993.0, "CurrentDatasourceVersionId": 5693173.0, "ForumId": 3295509, "Type": 2, "CreationDate": "05/06/2023 11:54:22", "LastActivityDate": "05/06/2023", "TotalViews": 8885, "TotalDownloads": 1402, "TotalVotes": 42, "TotalKernels": 12}]
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") from matplotlib import rcParams df = pd.read_csv("/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv") df.head() # **Exploring the dataset** df.shape # Work in the columns. df.columns df.columns = df.columns.str.lower() df.rename(columns={"Unnamed: 0": "unnamed"}).head(2) # Dropping the columns that won't be useful. df.drop(["unnamed: 0", "person_id"], axis=1, inplace=True) df.head(1) # Exploring and understanding data. df.info() # Each data type is ok, no correction needed. df.isnull().sum() # There is no null values (Too good to be true). df.duplicated().value_counts() df.drop_duplicates(inplace=True) df.duplicated().value_counts() # The database has no duplicate or missing values. # **Understanding data** # Starting with qualitative data. df.hospitalised.value_counts().reset_index() df.hospitalised.value_counts(normalize=True).reset_index().round(2) # Sixty percent of the attempt of suicide were hospitalized. fig, ax = plt.subplots(figsize=(4, 4)) ax = sns.countplot(data=df, x=df["hospitalised"]) ax.set_title("Suicide Attempts Hospitalized", size=14) ax.set_ylabel("Count", size=15) ax.set_xlabel("People hospitalized", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=12) plt.show() df.died.value_counts().reset_index().round(3) df.died.value_counts(normalize=True).reset_index().round(3) # Almost half of people who attempt to commit suicide died. fig, ax = plt.subplots(figsize=(4, 4)) ax = sns.countplot(data=df, x=df["died"]) ax.set_title("Suicide Attempts Died", size=16) ax.set_ylabel("Count", size=15) ax.set_xlabel("Died", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=12) plt.show() # How many people were hospitalized and died?. hospitalized_died = df[(df["hospitalised"] == "yes") & (df["died"] == "yes")] hospitalized_died.value_counts().sum() hos_and_died = (238 * 100) / 2571 print(hos_and_died) # 9 % of the people who attempted to suicide were hospitalized and died. df.urban.value_counts().reset_index() df.urban.value_counts(normalize=True).reset_index().round(2) # It's interesting that the majority of the cases are no urban. fig, ax = plt.subplots(figsize=(4, 4)) sns.countplot(data=df, x=df["urban"]) ax.set_title("Suicide Attempts Urban Areas", size=16) ax.set_ylabel("Count", size=15) ax.set_xlabel("Urban", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=12) plt.show() df.sex.value_counts().reset_index() df.sex.value_counts(normalize=True).reset_index().round(2) # According to data there is no difference between sex at the time to attempt to commit suicide (At least in China) fig, ax = plt.subplots(figsize=(4, 4)) sns.countplot(data=df, x="sex") ax.set_title("Suicide Attempts by Sex", size=16) ax.set_ylabel("Count", size=15) ax.set_xlabel("Sex", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=12) plt.show() df.education.value_counts().reset_index() df.education.value_counts(normalize=True).reset_index().round(2) fig, ax = plt.subplots(figsize=(6, 6)) sns.countplot(data=df, x="education") ax.set_title("Suicide Attempts by education", size=14) ax.set_ylabel("Count", size=15) ax.set_xlabel("Education", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=12) plt.show() df.occupation.value_counts().reset_index() df.occupation.value_counts(normalize=True).reset_index().round(3) # This is a surprising founding, there is a lot of farmers that attempts to suicide! fig, ax = plt.subplots(figsize=(8, 4)) sns.countplot(data=df, x="occupation") ax.set_title("Suicide by occupation", size=18) ax.set_ylabel("Count", size=15) ax.set_xlabel("Occupation", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=10, rotation=30) plt.show() df.method.value_counts().reset_index() df.method.value_counts(normalize=True).reset_index().round(3) fig, ax = plt.subplots(figsize=(8, 4)) sns.countplot(data=df, x="method") ax.set_title("Suicide by method", size=14) ax.set_ylabel("Count", size=15) ax.set_xlabel("method", size=15) ax.set_xticklabels(ax.get_xticklabels(), size=10, rotation=30) plt.show() # Interesting foundings, most of the people who try to commit suicide are farmer and try to do it with pesticide. # Is that a coincidence?, no, it is not, pesticides are the first thing that a farmer has to try to kill him self. # Analizing numerical data: df.head(2) p = sns.displot(data=df, x="age", bins=18, kde=True).set( title="Attemp to suicide in function of the Age" ) plt.gcf().set_size_inches(6, 4) plt.show() # It looks like the most frequent age to attempt to commit suicide is between 40 and 60 (Life is hard guys) p = sns.displot(data=df, x="month", bins=12, kde=True) plt.gcf().set_size_inches(6, 4) plt.show()
false
1
1,932
0
2,345
1,932
129526286
<jupyter_start><jupyter_text>T5_embeds Kaggle dataset identifier: t5embeds <jupyter_script># numpy for numerical computing import numpy as np # pandas for DataFrames import pandas as pd # plotly for visualization of the reduced embeddings import plotly.express as px # to calculate the PCAs from sklearn.decomposition import PCA # SVD for dimensionality reduction from sklearn.decomposition import TruncatedSVD # for K means clustering from sklearn.cluster import KMeans # path to the t5 embeddings train_embeddings_path = "/kaggle/input/t5embeds/train_embeds.npy" test_embeddings_path = "/kaggle/input/t5embeds/test_embeds.npy" train_ids_path = "/kaggle/input/t5embeds/train_ids.npy" test_ids_path = "/kaggle/input/t5embeds/test_ids.npy" # read the ids and embeddings into numpy arrays train_ids = np.load(train_ids_path) test_ids = np.load(test_ids_path) train_embeddings = np.load(train_embeddings_path) test_embeddings = np.load(test_embeddings_path) train_ids.shape, test_ids.shape, train_embeddings.shape, test_embeddings.shape # we want to reduce the embeddings to 2 dimensions for visualization pca = PCA(n_components=2) train_embeddings_reduced = pca.fit_transform(train_embeddings) test_embeddings_reduced = pca.transform(test_embeddings) # shape train_embeddings_reduced.shape, test_embeddings_reduced.shape # visualize the reduced embeddings # label train and test to distinguish them train_labels = np.repeat("train", train_embeddings_reduced.shape[0]) test_labels = np.repeat("test", test_embeddings_reduced.shape[0]) # combine the embeddings and labels embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_labels, test_labels]) # create a DataFrame df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels # plot fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.2, ) fig.show() # we have the GO annotations for the training set # read the annotations into a DataFrame train_annotations = pd.read_csv( "/kaggle/input/cafa-5-protein-function-prediction/Train/train_terms.tsv", sep="\t" ) # accumalate for each EntryID the GO terms in a list train_annotations = ( train_annotations.groupby("EntryID")["term"].apply(list).reset_index() ) train_annotations.head() # count the number of GO terms for each EntryID train_annotations["num_terms"] = train_annotations["term"].apply(lambda x: len(x)) train_annotations.head() # make a dictonary using EntryID and num_terms train_annotations_dict = dict( zip(train_annotations["EntryID"], train_annotations["num_terms"]) ) # map the dictonary to the train_ids and use 0 as default value for train -1 for test # this will be used to color the plot train_labels = np.array([train_annotations_dict.get(x, 0) for x in train_ids]) test_labels = np.repeat(-1, test_ids.shape[0]) # combine the embeddings and labels embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_labels, test_labels]) # create a DataFrame df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels # plot, use a rainbow color scheme fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.2, color_continuous_scale="rainbow", ) fig.show() # determine the most frequent GO terms # flatten the list of lists terms = [item for sublist in train_annotations["term"].values for item in sublist] # count the number of occurences for each GO term term_counts = pd.Series(terms).value_counts() # make a boolean column for the annotations and see if the term is in the top 1000 train_annotations["top_1000"] = train_annotations["term"].apply( lambda x: any([term in x for term in term_counts.index[:1000]]) ) train_annotations.head() # use the top 1000 terms to color the plot # make a dictonary using EntryID and top_1000 train_annotations_dict = dict( zip(train_annotations["EntryID"], train_annotations["top_1000"]) ) # map the dictonary, use -1 as default value train_labels = np.array([train_annotations_dict.get(x, -1) for x in train_ids]) test_labels = np.repeat(-1, test_ids.shape[0]) # combine the embeddings and labels embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_labels, test_labels]) # create a DataFrame df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels # plot, use different color for each label fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.1, color_discrete_sequence=px.colors.qualitative.Pastel, ) fig.show() # use SVD to reduce the embeddings to 2 dimensions svd = TruncatedSVD(n_components=2) train_embeddings_reduced = svd.fit_transform(train_embeddings) test_embeddings_reduced = svd.transform(test_embeddings) # shape train_embeddings_reduced.shape, test_embeddings_reduced.shape # visualize the reduced embeddings, labelling which is in train and which in test as above train_labels = np.repeat("train", train_embeddings_reduced.shape[0]) test_labels = np.repeat("test", test_embeddings_reduced.shape[0]) embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_labels, test_labels]) df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.2, ) fig.show() # reduce to 2 dimensions with PCA, cluster the reduced embeddings with k-means and color the plot with clusters as labels pca = PCA(n_components=2) train_embeddings_reduced = pca.fit_transform(train_embeddings) test_embeddings_reduced = pca.transform(test_embeddings) # cluster the reduced embeddings with k-means kmeans = KMeans(n_init=10, n_clusters=10, random_state=42) train_clusters = kmeans.fit_predict(train_embeddings_reduced) test_clusters = kmeans.predict(test_embeddings_reduced) # combine the embeddings and labels embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_clusters, test_clusters]) # create a DataFrame df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels # plot fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.2, ) fig.show() # count how many entries of train and test are in each cluster train_clusters_counts = pd.Series(train_clusters).value_counts() test_clusters_counts = pd.Series(test_clusters).value_counts() # visualize the distribution of train and test entries in the clusters seperately # make a DataFrame df = pd.DataFrame({"train": train_clusters_counts, "test": test_clusters_counts}) # plot fig = px.bar( df, x=df.index, y=["train", "test"], title="Distribution of Train and Test Entries in the Clusters", barmode="group", ) fig.show() # it seems that the clusters are not very useful for seperating train and test after reduction by PCA # same as above but with SVD svd = TruncatedSVD(n_components=2) train_embeddings_reduced = svd.fit_transform(train_embeddings) test_embeddings_reduced = svd.transform(test_embeddings) # cluster the reduced embeddings with k-means kmeans = KMeans(n_init=10, n_clusters=10, random_state=42) train_clusters = kmeans.fit_predict(train_embeddings_reduced) test_clusters = kmeans.predict(test_embeddings_reduced) # combine the embeddings and labels embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_clusters, test_clusters]) # create a DataFrame df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels # plot fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.2, ) fig.show() # count how many entries of train and test are in each cluster train_clusters_counts = pd.Series(train_clusters).value_counts() test_clusters_counts = pd.Series(test_clusters).value_counts() # visualize the distribution of train and test entries in the clusters seperately # make a DataFrame df = pd.DataFrame({"train": train_clusters_counts, "test": test_clusters_counts}) # plot fig = px.bar( df, x=df.index, y=["train", "test"], title="Distribution of Train and Test Entries in the Clusters", barmode="group", ) fig.show() # similar as for pca we see that the clusters are not very useful for seperating train and test after reduction by SVD # make a PCA reduction to 3 dimensions pca = PCA(n_components=3) train_embeddings_reduced = pca.fit_transform(train_embeddings) test_embeddings_reduced = pca.transform(test_embeddings) # shape train_embeddings_reduced.shape, test_embeddings_reduced.shape # label with train and test and plot train_labels = np.repeat("train", train_embeddings_reduced.shape[0]) test_labels = np.repeat("test", test_embeddings_reduced.shape[0]) embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_labels, test_labels]) df = pd.DataFrame(embeddings, columns=["x", "y", "z"]) df["labels"] = labels fig = px.scatter_3d( df, x="x", y="y", z="z", color="labels", title="T5 Embeddings Reduced to 3 Dimensions", opacity=0.2, ) fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/526/129526286.ipynb
t5embeds
sergeifironov
[{"Id": 129526286, "ScriptId": 38514432, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11646918, "CreationDate": "05/14/2023 14:39:34", "VersionNumber": 1.0, "Title": "Basic EDA of T5 embeddings", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 198.0, "LinesInsertedFromPrevious": 198.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185673287, "KernelVersionId": 129526286, "SourceDatasetVersionId": 5499219}]
[{"Id": 5499219, "DatasetId": 3167603, "DatasourceVersionId": 5573606, "CreatorUserId": 71783, "LicenseName": "CC0: Public Domain", "CreationDate": "04/23/2023 17:52:38", "VersionNumber": 4.0, "Title": "T5_embeds", "Slug": "t5embeds", "Subtitle": NaN, "Description": NaN, "VersionNotes": "recalc test", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3167603, "CreatorUserId": 71783, "OwnerUserId": 71783.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5499219.0, "CurrentDatasourceVersionId": 5573606.0, "ForumId": 3231817, "Type": 2, "CreationDate": "04/22/2023 14:08:04", "LastActivityDate": "04/22/2023", "TotalViews": 6097, "TotalDownloads": 2258, "TotalVotes": 64, "TotalKernels": 107}]
[{"Id": 71783, "UserName": "sergeifironov", "DisplayName": "Sergei Fironov", "RegisterDate": "11/30/2012", "PerformanceTier": 4}]
# numpy for numerical computing import numpy as np # pandas for DataFrames import pandas as pd # plotly for visualization of the reduced embeddings import plotly.express as px # to calculate the PCAs from sklearn.decomposition import PCA # SVD for dimensionality reduction from sklearn.decomposition import TruncatedSVD # for K means clustering from sklearn.cluster import KMeans # path to the t5 embeddings train_embeddings_path = "/kaggle/input/t5embeds/train_embeds.npy" test_embeddings_path = "/kaggle/input/t5embeds/test_embeds.npy" train_ids_path = "/kaggle/input/t5embeds/train_ids.npy" test_ids_path = "/kaggle/input/t5embeds/test_ids.npy" # read the ids and embeddings into numpy arrays train_ids = np.load(train_ids_path) test_ids = np.load(test_ids_path) train_embeddings = np.load(train_embeddings_path) test_embeddings = np.load(test_embeddings_path) train_ids.shape, test_ids.shape, train_embeddings.shape, test_embeddings.shape # we want to reduce the embeddings to 2 dimensions for visualization pca = PCA(n_components=2) train_embeddings_reduced = pca.fit_transform(train_embeddings) test_embeddings_reduced = pca.transform(test_embeddings) # shape train_embeddings_reduced.shape, test_embeddings_reduced.shape # visualize the reduced embeddings # label train and test to distinguish them train_labels = np.repeat("train", train_embeddings_reduced.shape[0]) test_labels = np.repeat("test", test_embeddings_reduced.shape[0]) # combine the embeddings and labels embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_labels, test_labels]) # create a DataFrame df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels # plot fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.2, ) fig.show() # we have the GO annotations for the training set # read the annotations into a DataFrame train_annotations = pd.read_csv( "/kaggle/input/cafa-5-protein-function-prediction/Train/train_terms.tsv", sep="\t" ) # accumalate for each EntryID the GO terms in a list train_annotations = ( train_annotations.groupby("EntryID")["term"].apply(list).reset_index() ) train_annotations.head() # count the number of GO terms for each EntryID train_annotations["num_terms"] = train_annotations["term"].apply(lambda x: len(x)) train_annotations.head() # make a dictonary using EntryID and num_terms train_annotations_dict = dict( zip(train_annotations["EntryID"], train_annotations["num_terms"]) ) # map the dictonary to the train_ids and use 0 as default value for train -1 for test # this will be used to color the plot train_labels = np.array([train_annotations_dict.get(x, 0) for x in train_ids]) test_labels = np.repeat(-1, test_ids.shape[0]) # combine the embeddings and labels embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_labels, test_labels]) # create a DataFrame df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels # plot, use a rainbow color scheme fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.2, color_continuous_scale="rainbow", ) fig.show() # determine the most frequent GO terms # flatten the list of lists terms = [item for sublist in train_annotations["term"].values for item in sublist] # count the number of occurences for each GO term term_counts = pd.Series(terms).value_counts() # make a boolean column for the annotations and see if the term is in the top 1000 train_annotations["top_1000"] = train_annotations["term"].apply( lambda x: any([term in x for term in term_counts.index[:1000]]) ) train_annotations.head() # use the top 1000 terms to color the plot # make a dictonary using EntryID and top_1000 train_annotations_dict = dict( zip(train_annotations["EntryID"], train_annotations["top_1000"]) ) # map the dictonary, use -1 as default value train_labels = np.array([train_annotations_dict.get(x, -1) for x in train_ids]) test_labels = np.repeat(-1, test_ids.shape[0]) # combine the embeddings and labels embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_labels, test_labels]) # create a DataFrame df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels # plot, use different color for each label fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.1, color_discrete_sequence=px.colors.qualitative.Pastel, ) fig.show() # use SVD to reduce the embeddings to 2 dimensions svd = TruncatedSVD(n_components=2) train_embeddings_reduced = svd.fit_transform(train_embeddings) test_embeddings_reduced = svd.transform(test_embeddings) # shape train_embeddings_reduced.shape, test_embeddings_reduced.shape # visualize the reduced embeddings, labelling which is in train and which in test as above train_labels = np.repeat("train", train_embeddings_reduced.shape[0]) test_labels = np.repeat("test", test_embeddings_reduced.shape[0]) embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_labels, test_labels]) df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.2, ) fig.show() # reduce to 2 dimensions with PCA, cluster the reduced embeddings with k-means and color the plot with clusters as labels pca = PCA(n_components=2) train_embeddings_reduced = pca.fit_transform(train_embeddings) test_embeddings_reduced = pca.transform(test_embeddings) # cluster the reduced embeddings with k-means kmeans = KMeans(n_init=10, n_clusters=10, random_state=42) train_clusters = kmeans.fit_predict(train_embeddings_reduced) test_clusters = kmeans.predict(test_embeddings_reduced) # combine the embeddings and labels embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_clusters, test_clusters]) # create a DataFrame df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels # plot fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.2, ) fig.show() # count how many entries of train and test are in each cluster train_clusters_counts = pd.Series(train_clusters).value_counts() test_clusters_counts = pd.Series(test_clusters).value_counts() # visualize the distribution of train and test entries in the clusters seperately # make a DataFrame df = pd.DataFrame({"train": train_clusters_counts, "test": test_clusters_counts}) # plot fig = px.bar( df, x=df.index, y=["train", "test"], title="Distribution of Train and Test Entries in the Clusters", barmode="group", ) fig.show() # it seems that the clusters are not very useful for seperating train and test after reduction by PCA # same as above but with SVD svd = TruncatedSVD(n_components=2) train_embeddings_reduced = svd.fit_transform(train_embeddings) test_embeddings_reduced = svd.transform(test_embeddings) # cluster the reduced embeddings with k-means kmeans = KMeans(n_init=10, n_clusters=10, random_state=42) train_clusters = kmeans.fit_predict(train_embeddings_reduced) test_clusters = kmeans.predict(test_embeddings_reduced) # combine the embeddings and labels embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_clusters, test_clusters]) # create a DataFrame df = pd.DataFrame(embeddings, columns=["x", "y"]) df["labels"] = labels # plot fig = px.scatter( df, x="x", y="y", color="labels", title="T5 Embeddings Reduced to 2 Dimensions", opacity=0.2, ) fig.show() # count how many entries of train and test are in each cluster train_clusters_counts = pd.Series(train_clusters).value_counts() test_clusters_counts = pd.Series(test_clusters).value_counts() # visualize the distribution of train and test entries in the clusters seperately # make a DataFrame df = pd.DataFrame({"train": train_clusters_counts, "test": test_clusters_counts}) # plot fig = px.bar( df, x=df.index, y=["train", "test"], title="Distribution of Train and Test Entries in the Clusters", barmode="group", ) fig.show() # similar as for pca we see that the clusters are not very useful for seperating train and test after reduction by SVD # make a PCA reduction to 3 dimensions pca = PCA(n_components=3) train_embeddings_reduced = pca.fit_transform(train_embeddings) test_embeddings_reduced = pca.transform(test_embeddings) # shape train_embeddings_reduced.shape, test_embeddings_reduced.shape # label with train and test and plot train_labels = np.repeat("train", train_embeddings_reduced.shape[0]) test_labels = np.repeat("test", test_embeddings_reduced.shape[0]) embeddings = np.concatenate([train_embeddings_reduced, test_embeddings_reduced]) labels = np.concatenate([train_labels, test_labels]) df = pd.DataFrame(embeddings, columns=["x", "y", "z"]) df["labels"] = labels fig = px.scatter_3d( df, x="x", y="y", z="z", color="labels", title="T5 Embeddings Reduced to 3 Dimensions", opacity=0.2, ) fig.show()
false
0
2,773
0
2,795
2,773
129551175
# импортируем библиотеки import pandas as pd import numpy as np import json import requests import glob from bs4 import BeautifulSoup as bs from docx import Document import codecs import warnings warnings.filterwarnings("ignore") # ## 1.1 Парсинг данных # ### 1.1.1 Парсинг с docx path = r"Condidates.docx" doc = Document(path) parag = doc.paragraphs company_dox = [] for para in parag: if para.text != "": company_dox.append(para.text.strip()) company_dox # ### 1.1.2 Парсинг с JSON path = r"Data\*.json" result_list_JSON = {"name": [], "desc": [], "rating": [], "date": [], "text": []} company_json = [] # Использование glob для получения списка файлов, соответствующих шаблону json_files = glob.glob(path) # Цикл по файлам JSON для извлечения данных for jsonfile in json_files: # Чтение файла JSON и преобразование данных в словарь Python with codecs.open(jsonfile, "r", encoding="utf-8") as f: data = json.load(f) company_json.append( jsonfile.replace("C:\\Users\\МОиБД\\Downloads\\Data\\", "")[:-5] ) for item in data["refs"]: if item != None: try: if data["info"] != None: result_list_JSON["desc"].append(data["info"]["about"]) result_list_JSON["rating"].append(data["info"]["rate"]) result_list_JSON["name"].append( jsonfile.replace("C:\\Users\\МОиБД\\Downloads\\Data\\", "")[ :-5 ] ) result_list_JSON["date"].append( item[1]["day"] + " " + item[1]["month"] ) result_list_JSON["text"].append(item[0]) else: result_list_JSON["desc"].append(data["info"]) result_list_JSON["rating"].append(data["info"]) result_list_JSON["name"].append( jsonfile.replace("C:\\Users\\МОиБД\\Downloads\\Data\\", "")[ :-5 ] ) result_list_JSON["date"].append( item[1]["day"] + " " + item[1]["month"] ) result_list_JSON["text"].append(item[0]) except: continue # __Создадим DataFrame__ df_json = pd.DataFrame.from_dict(result_list_JSON, orient="index") df_json = df_json.transpose() df_json df_json.shape df_json.isna().sum() # ### 1.1.3 Парсинг с Habr # __Подключимся и проверим подключение__ url = "https://habr.com/ru/search/" page = requests.get(url) page.status_code # Код вернул статус код '200', это значит, что мы успешно подключены. urls = [] for company in company_dox: urls.append( f"https://habr.com/ru/search/?q={company}&target_type=companies&order=relevance" ) # создадим словарь, в который будем сохранять данные result_list = { "name": [], "desc": [], "scope": [], "rating": [], "date": [], "text": [], } for url in urls: pagenum = 1 url = url.replace(" ", "%20") response = requests.get(url) soup = bs(response.content, "html.parser") company = soup.find( "div", {"class": "tm-search-companies__item tm-search-companies__item_inlined"} ) if company != None: # название компании name = company.find("a", {"class": "tm-company-snippet__title"}).text.strip() if name not in company_dox: continue # описание компании desc = company.find( "div", {"class": "tm-company-snippet__description"} ).text.strip() # рейтинг rating = ( company.find( "span", { "class": "tm-search-companies__score-counter tm-search-companies__score-counter_rating" }, ) .text.split("\n")[1] .strip() ) # переходим на страницу профиля компании url = "https://habr.com" + str( company.find("a", {"class": "tm-company-snippet__title"}).get("href") ) page = requests.get(url) soup = bs(page.text, "html.parser") # находим сферы деятельности компании scopes = soup.find("div", {"class": "tm-company-profile__categories"}) # создаём список сфер деятельности компании scopes_list = [] for scope in scopes: scopes_list.append(scope.text.strip()) company_link = str( soup.find("a", {"class": "tm-tabs__tab-link tm-tabs__tab-link"}).get("href") ) while True: # переходим на страницу статей компании url = "https://habr.com" + company_link + f"page{pagenum}" print(url) page = requests.get(url) soup = bs(page.text, "html.parser") articles = soup.find_all("a", {"class": "tm-title__link"}) if articles == []: break else: for article in articles: # переход на страницу статьи url = "https://habr.com" + str(article.get("href")) page = requests.get(url) soup = bs(page.text, "html.parser") # находим текст статьи divText = soup.find("div", {"class": "tm-article-body"}) try: result_list["text"].append(divText.text.strip()) # записываем в словарь уже найденные данные result_list["name"].append(name) result_list["desc"].append(desc) result_list["rating"].append(rating) result_list["scope"].append(scopes_list) # находим и записываем дату публикации статьи result_list["date"].append( soup.find( "span", {"class": "tm-article-datetime-published"} ).text.strip() ) except Exception as err: print(err) pagenum = pagenum + 1 # __Выведем получившейся словарь__ result_list # __Создадим DataFrame и выведем его__ df = pd.DataFrame(result_list) df # __Сохраним получившийся DF в формате .csv__ df.to_csv("HabrPars.csv", index=False) df = pd.read_csv("HabrPars.csv") df # __Объединим полученные DF__ df = pd.concat([df, df_json]) df = df.reset_index(drop=True) df # __Сохраним полученный DF в .csv__ df.to_csv("HabrParsAndJSON.csv", index=False) df = pd.read_csv("HabrParsAndJSON.csv") df # ## 1.2 Формирование структуры набора данных # __Столбец 'desc' и 'scope' бесполезны для определения кластера. Удалим их__ new_df = df.drop(["desc", "scope"], axis=1) new_df # __Заменю пустые значения рейтинга, на среднее значение__ new_df["rating"].fillna(new_df["rating"].median(), inplace=True) new_df.isna().sum() new_df # ## 1.3 Предварительная обработка текстовых данных # Предварительная обработка текстовых данных заключается в приведении текста в форму, которая можето быть легко понятна и обработана компьютером. Существует несколько методов предобработки теста: токенизация, чистка данных(удаление лишних символов), удаление стопслов, лемматизация, стемминг и векторизация. # Вот причины того, почему нужно реализовать эти методы: # Улучшение качества результатов: Предварительная обработка текста может повысить точность алгоритмов машинного обучения, которые работают с текстами и позволяют получить более точные результаты. Это может быть особенно важно для задач, таких как красительная классификация и кластеризация информации. # Улучшение скорости работы алгоритмов: Предварительная обработка может ускорить работу алгоритмов, которые обрабатывают текстовые данные. Использование методов, таких как Tfidf-векторизация, позволяет быстро создавать матрицы данных, которые могут быть использованы в алгоритме машинного обучения для классификации или кластеризации текстовых данных. # Обеспечение унификации текстов: Приведение текстов к единому формату с помощью методов, таких как токенизация и лемматизация, позволяет обеспечить унификацию текстов. Это может помочь избежать проблем с несовпадением данных и сделать результаты более надежными. # Уменьшение шума в данных: Методы, такие как удаление стоп-слов и чистка данных, могут помочь уменьшить шум в данных. Это позволяет повысить точность и надежность результатов анализа текстовой информации, так как выборка текстовых данных будет содержать меньше ошибок и ненужной информации. # подключем библиотеки import string import re import nltk import pymorphy2 from nltk.corpus import stopwords nltk.download("punkt") nltk.download("word_tokenize") from nltk.tokenize import word_tokenize from nltk.stem import SnowballStemmer from nltk.stem import WordNetLemmatizer morph = pymorphy2.MorphAnalyzer() # удаление символов, которые не являются буквами def remove_notalpha(text): return "".join([i if i.isalpha() else " " for i in text]) # удаление латиницы def remove_latin(text): return re.sub("[a-z]", "", text, flags=re.I) # удаление лишних пробелов def remove_space(text): return re.sub(r"\s+", " ", text, flags=re.I) # Вытащим стопслова из nltk stopword = nltk.corpus.stopwords.words("russian") stopword # токенизация def tokenize(text): t = word_tokenize(text) return [token for token in t if token not in stopword] # Лемматизация def lemmatize(text): res = list() for word in text: p = morph.parse(word)[0] res.append(p.normal_form) return res # __Применим все вышеописанные методы на колонке 'text'__ prep_text = [ lemmatize(tokenize(remove_space(remove_latin(remove_notalpha(text.lower()))))) for text in new_df["text"] ] # __Добавим новую колонку 'prep_text' и запишем туда полученные данные__ new_df["prep_text"] = prep_text new_df # удаление стоп слов def remove_stopwords(text): return [word for word in text if word not in stopword] # __Удалим стопслова и обновим данные в столбце 'prep_text'__ prep_text = remove_stopwords(new_df["prep_text"]) new_df["prep_text"] = prep_text new_df # __Выделим части речи__ nltk.download("averaged_perceptron_tagger_ru") pos = new_df["prep_text"].apply(lambda x: nltk.pos_tag(x, lang="rus")) new_df["PoS"] = pos new_df # ## 1.4 Поиск ключевых слов/n-грамм. Векторизация текстов # __Выбранные алгоритмы:__ # __Bag of words__ - это метод обработки текстов, при котором тексты представляются в виде множества слов, игнорируя порядок слов их вхождения. Каждый документ рассматривается как "мешок" слов, где каждое слово в документе представлено в виде отдельного токена. # __TF-IDF__ - это метод оценки важности слова в тексте, учитывающий частоту его встречаемости в документе и корпусе текстовых документов. # ### Bag of words # Преобразуем в строку каждый элемент атрибута 'prep_text' new_df["prep_text"] = new_df["prep_text"].apply(lambda x: " ".join(x)) from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer( analyzer="word", stop_words=stopwords.words("russian"), ngram_range=(1, 3), min_df=2 ) count_matrix = vectorizer.fit_transform(new_df["prep_text"]) count_matrix.shape vectorizer.get_feature_names_out()[:50] # ### TF-IDF from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer( max_df=0.8, max_features=10000, min_df=0.01, stop_words=stopwords.words("russian"), ngram_range=(1, 3), ) tfidf_matrix = vectorizer.fit_transform(new_df["prep_text"]) tfidf_matrix.shape vectorizer.get_feature_names_out()[:50] # __Юниграммы__ ngram_vector = TfidfVectorizer( stop_words=stopwords.words("russian"), ngram_range=(1, 1) ) for i in range(len(new_df)): if len(new_df.loc[i, "prep_text"]) < 10: continue ngrams = ngram_vector.fit_transform([new_df.loc[i, "prep_text"]]).toarray() new_df.loc[i, "1gram"] = str( dict(zip(ngram_vector.get_feature_names_out(), ngrams[0])) ) # __Биграммы__ ngram_vector = TfidfVectorizer( stop_words=stopwords.words("russian"), ngram_range=(2, 2) ) for i in range(len(new_df)): if len(new_df.loc[i, "prep_text"]) < 10: continue ngrams = ngram_vector.fit_transform([new_df.loc[i, "prep_text"]]).toarray() new_df.loc[i, "2gram"] = str( dict(zip(ngram_vector.get_feature_names_out(), ngrams[0])) ) # __Триграммы__ ngram_vector = TfidfVectorizer( stop_words=stopwords.words("russian"), ngram_range=(3, 3) ) for i in range(len(new_df)): if len(new_df.loc[i, "prep_text"]) < 10: continue ngrams = ngram_vector.fit_transform([new_df.loc[i, "prep_text"]]).toarray() new_df.loc[i, "3gram"] = str( dict(zip(ngram_vector.get_feature_names_out(), ngrams[0])) ) new_df.isna().sum() new_df = new_df.dropna() new_df # ## 1.5 Разведочный анализ # __Создам DF с целевой переменной__ # Считаем данные из JSON файла with open("Target.json", "r", encoding="utf-8") as my_file: # открытие target_json = my_file.read() # чтение targets = json.loads(target_json) # загрузка company_name = [] nominant = [] for i in range(len(targets["target"])): company_name.append(targets["target"][i]["Сompany"]) nominant.append(targets["target"][i]["Nominations"]) targets = pd.DataFrame() targets["company_name"] = company_name targets["nomination"] = nominant targets # __Добавлю целевую переменную в основной набор данных__ new_df = new_df.reset_index(drop=True) new_target = [] for i in range(len(new_df["name"])): try: k = 0 for j in range(len(targets["company_name"])): if ( new_df["name"][i].lower().strip() == targets["company_name"][j].lower().strip() ): new_target.append(targets["nomination"][j]) else: k = k + 1 if k == len(targets["company_name"]): new_target.append("Нет номинации") except: print(i) continue len(new_target) new_df["target"] = new_target new_df.head() new_df.isna().sum() # Закодирую номинации import pylab import scipy.stats as stats from sklearn import preprocessing label_encoder = preprocessing.LabelEncoder() new_df["target_code"] = label_encoder.fit_transform(new_df["target"]) new_df new_df.to_csv("dataset.csv") # __График распределения целевой переменной__ import seaborn as sns sns_plot = sns.distplot(new_df["target_code"]) fig = sns_plot.get_figure() sns_plot = sns.boxplot(new_df["target_code"]) fig = sns_plot.get_figure() # __Статическая оценка распределения целевой переменной с помощью теста Шапиро-Уилка__ from scipy.stats import shapiro stat, pvalue = shapiro(new_df["target_code"]) print(stat, pvalue) if pvalue > 0.05: print("Нормальное распределение") else: print("Ненормальное распределение") # Распределение целевой переменной не является нормальным. # __Зависимость темы от временных признаков__ new_df["date"] # Преобразую дату в нужный формат. datetime.datetime.strptime(new_df["date"][2], "%d %b %Y в %H:%M") # __Зависимость темы от рейтинга__ sns.set(rc={"figure.figsize": (20, 10)}) sns.scatterplot(data=new_df, x="target", y="rating") # __Количество публикаций по рейтингу__ sns.set(rc={"figure.figsize": (20, 10)}) sns.countplot(x=new_df["rating"])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/551/129551175.ipynb
null
null
[{"Id": 129551175, "ScriptId": 38518830, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13417746, "CreationDate": "05/14/2023 18:50:15", "VersionNumber": 2.0, "Title": "Report1-GM-djostit", "EvaluationDate": "05/14/2023", "IsChange": false, "TotalLines": 505.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 505.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# импортируем библиотеки import pandas as pd import numpy as np import json import requests import glob from bs4 import BeautifulSoup as bs from docx import Document import codecs import warnings warnings.filterwarnings("ignore") # ## 1.1 Парсинг данных # ### 1.1.1 Парсинг с docx path = r"Condidates.docx" doc = Document(path) parag = doc.paragraphs company_dox = [] for para in parag: if para.text != "": company_dox.append(para.text.strip()) company_dox # ### 1.1.2 Парсинг с JSON path = r"Data\*.json" result_list_JSON = {"name": [], "desc": [], "rating": [], "date": [], "text": []} company_json = [] # Использование glob для получения списка файлов, соответствующих шаблону json_files = glob.glob(path) # Цикл по файлам JSON для извлечения данных for jsonfile in json_files: # Чтение файла JSON и преобразование данных в словарь Python with codecs.open(jsonfile, "r", encoding="utf-8") as f: data = json.load(f) company_json.append( jsonfile.replace("C:\\Users\\МОиБД\\Downloads\\Data\\", "")[:-5] ) for item in data["refs"]: if item != None: try: if data["info"] != None: result_list_JSON["desc"].append(data["info"]["about"]) result_list_JSON["rating"].append(data["info"]["rate"]) result_list_JSON["name"].append( jsonfile.replace("C:\\Users\\МОиБД\\Downloads\\Data\\", "")[ :-5 ] ) result_list_JSON["date"].append( item[1]["day"] + " " + item[1]["month"] ) result_list_JSON["text"].append(item[0]) else: result_list_JSON["desc"].append(data["info"]) result_list_JSON["rating"].append(data["info"]) result_list_JSON["name"].append( jsonfile.replace("C:\\Users\\МОиБД\\Downloads\\Data\\", "")[ :-5 ] ) result_list_JSON["date"].append( item[1]["day"] + " " + item[1]["month"] ) result_list_JSON["text"].append(item[0]) except: continue # __Создадим DataFrame__ df_json = pd.DataFrame.from_dict(result_list_JSON, orient="index") df_json = df_json.transpose() df_json df_json.shape df_json.isna().sum() # ### 1.1.3 Парсинг с Habr # __Подключимся и проверим подключение__ url = "https://habr.com/ru/search/" page = requests.get(url) page.status_code # Код вернул статус код '200', это значит, что мы успешно подключены. urls = [] for company in company_dox: urls.append( f"https://habr.com/ru/search/?q={company}&target_type=companies&order=relevance" ) # создадим словарь, в который будем сохранять данные result_list = { "name": [], "desc": [], "scope": [], "rating": [], "date": [], "text": [], } for url in urls: pagenum = 1 url = url.replace(" ", "%20") response = requests.get(url) soup = bs(response.content, "html.parser") company = soup.find( "div", {"class": "tm-search-companies__item tm-search-companies__item_inlined"} ) if company != None: # название компании name = company.find("a", {"class": "tm-company-snippet__title"}).text.strip() if name not in company_dox: continue # описание компании desc = company.find( "div", {"class": "tm-company-snippet__description"} ).text.strip() # рейтинг rating = ( company.find( "span", { "class": "tm-search-companies__score-counter tm-search-companies__score-counter_rating" }, ) .text.split("\n")[1] .strip() ) # переходим на страницу профиля компании url = "https://habr.com" + str( company.find("a", {"class": "tm-company-snippet__title"}).get("href") ) page = requests.get(url) soup = bs(page.text, "html.parser") # находим сферы деятельности компании scopes = soup.find("div", {"class": "tm-company-profile__categories"}) # создаём список сфер деятельности компании scopes_list = [] for scope in scopes: scopes_list.append(scope.text.strip()) company_link = str( soup.find("a", {"class": "tm-tabs__tab-link tm-tabs__tab-link"}).get("href") ) while True: # переходим на страницу статей компании url = "https://habr.com" + company_link + f"page{pagenum}" print(url) page = requests.get(url) soup = bs(page.text, "html.parser") articles = soup.find_all("a", {"class": "tm-title__link"}) if articles == []: break else: for article in articles: # переход на страницу статьи url = "https://habr.com" + str(article.get("href")) page = requests.get(url) soup = bs(page.text, "html.parser") # находим текст статьи divText = soup.find("div", {"class": "tm-article-body"}) try: result_list["text"].append(divText.text.strip()) # записываем в словарь уже найденные данные result_list["name"].append(name) result_list["desc"].append(desc) result_list["rating"].append(rating) result_list["scope"].append(scopes_list) # находим и записываем дату публикации статьи result_list["date"].append( soup.find( "span", {"class": "tm-article-datetime-published"} ).text.strip() ) except Exception as err: print(err) pagenum = pagenum + 1 # __Выведем получившейся словарь__ result_list # __Создадим DataFrame и выведем его__ df = pd.DataFrame(result_list) df # __Сохраним получившийся DF в формате .csv__ df.to_csv("HabrPars.csv", index=False) df = pd.read_csv("HabrPars.csv") df # __Объединим полученные DF__ df = pd.concat([df, df_json]) df = df.reset_index(drop=True) df # __Сохраним полученный DF в .csv__ df.to_csv("HabrParsAndJSON.csv", index=False) df = pd.read_csv("HabrParsAndJSON.csv") df # ## 1.2 Формирование структуры набора данных # __Столбец 'desc' и 'scope' бесполезны для определения кластера. Удалим их__ new_df = df.drop(["desc", "scope"], axis=1) new_df # __Заменю пустые значения рейтинга, на среднее значение__ new_df["rating"].fillna(new_df["rating"].median(), inplace=True) new_df.isna().sum() new_df # ## 1.3 Предварительная обработка текстовых данных # Предварительная обработка текстовых данных заключается в приведении текста в форму, которая можето быть легко понятна и обработана компьютером. Существует несколько методов предобработки теста: токенизация, чистка данных(удаление лишних символов), удаление стопслов, лемматизация, стемминг и векторизация. # Вот причины того, почему нужно реализовать эти методы: # Улучшение качества результатов: Предварительная обработка текста может повысить точность алгоритмов машинного обучения, которые работают с текстами и позволяют получить более точные результаты. Это может быть особенно важно для задач, таких как красительная классификация и кластеризация информации. # Улучшение скорости работы алгоритмов: Предварительная обработка может ускорить работу алгоритмов, которые обрабатывают текстовые данные. Использование методов, таких как Tfidf-векторизация, позволяет быстро создавать матрицы данных, которые могут быть использованы в алгоритме машинного обучения для классификации или кластеризации текстовых данных. # Обеспечение унификации текстов: Приведение текстов к единому формату с помощью методов, таких как токенизация и лемматизация, позволяет обеспечить унификацию текстов. Это может помочь избежать проблем с несовпадением данных и сделать результаты более надежными. # Уменьшение шума в данных: Методы, такие как удаление стоп-слов и чистка данных, могут помочь уменьшить шум в данных. Это позволяет повысить точность и надежность результатов анализа текстовой информации, так как выборка текстовых данных будет содержать меньше ошибок и ненужной информации. # подключем библиотеки import string import re import nltk import pymorphy2 from nltk.corpus import stopwords nltk.download("punkt") nltk.download("word_tokenize") from nltk.tokenize import word_tokenize from nltk.stem import SnowballStemmer from nltk.stem import WordNetLemmatizer morph = pymorphy2.MorphAnalyzer() # удаление символов, которые не являются буквами def remove_notalpha(text): return "".join([i if i.isalpha() else " " for i in text]) # удаление латиницы def remove_latin(text): return re.sub("[a-z]", "", text, flags=re.I) # удаление лишних пробелов def remove_space(text): return re.sub(r"\s+", " ", text, flags=re.I) # Вытащим стопслова из nltk stopword = nltk.corpus.stopwords.words("russian") stopword # токенизация def tokenize(text): t = word_tokenize(text) return [token for token in t if token not in stopword] # Лемматизация def lemmatize(text): res = list() for word in text: p = morph.parse(word)[0] res.append(p.normal_form) return res # __Применим все вышеописанные методы на колонке 'text'__ prep_text = [ lemmatize(tokenize(remove_space(remove_latin(remove_notalpha(text.lower()))))) for text in new_df["text"] ] # __Добавим новую колонку 'prep_text' и запишем туда полученные данные__ new_df["prep_text"] = prep_text new_df # удаление стоп слов def remove_stopwords(text): return [word for word in text if word not in stopword] # __Удалим стопслова и обновим данные в столбце 'prep_text'__ prep_text = remove_stopwords(new_df["prep_text"]) new_df["prep_text"] = prep_text new_df # __Выделим части речи__ nltk.download("averaged_perceptron_tagger_ru") pos = new_df["prep_text"].apply(lambda x: nltk.pos_tag(x, lang="rus")) new_df["PoS"] = pos new_df # ## 1.4 Поиск ключевых слов/n-грамм. Векторизация текстов # __Выбранные алгоритмы:__ # __Bag of words__ - это метод обработки текстов, при котором тексты представляются в виде множества слов, игнорируя порядок слов их вхождения. Каждый документ рассматривается как "мешок" слов, где каждое слово в документе представлено в виде отдельного токена. # __TF-IDF__ - это метод оценки важности слова в тексте, учитывающий частоту его встречаемости в документе и корпусе текстовых документов. # ### Bag of words # Преобразуем в строку каждый элемент атрибута 'prep_text' new_df["prep_text"] = new_df["prep_text"].apply(lambda x: " ".join(x)) from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer( analyzer="word", stop_words=stopwords.words("russian"), ngram_range=(1, 3), min_df=2 ) count_matrix = vectorizer.fit_transform(new_df["prep_text"]) count_matrix.shape vectorizer.get_feature_names_out()[:50] # ### TF-IDF from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer( max_df=0.8, max_features=10000, min_df=0.01, stop_words=stopwords.words("russian"), ngram_range=(1, 3), ) tfidf_matrix = vectorizer.fit_transform(new_df["prep_text"]) tfidf_matrix.shape vectorizer.get_feature_names_out()[:50] # __Юниграммы__ ngram_vector = TfidfVectorizer( stop_words=stopwords.words("russian"), ngram_range=(1, 1) ) for i in range(len(new_df)): if len(new_df.loc[i, "prep_text"]) < 10: continue ngrams = ngram_vector.fit_transform([new_df.loc[i, "prep_text"]]).toarray() new_df.loc[i, "1gram"] = str( dict(zip(ngram_vector.get_feature_names_out(), ngrams[0])) ) # __Биграммы__ ngram_vector = TfidfVectorizer( stop_words=stopwords.words("russian"), ngram_range=(2, 2) ) for i in range(len(new_df)): if len(new_df.loc[i, "prep_text"]) < 10: continue ngrams = ngram_vector.fit_transform([new_df.loc[i, "prep_text"]]).toarray() new_df.loc[i, "2gram"] = str( dict(zip(ngram_vector.get_feature_names_out(), ngrams[0])) ) # __Триграммы__ ngram_vector = TfidfVectorizer( stop_words=stopwords.words("russian"), ngram_range=(3, 3) ) for i in range(len(new_df)): if len(new_df.loc[i, "prep_text"]) < 10: continue ngrams = ngram_vector.fit_transform([new_df.loc[i, "prep_text"]]).toarray() new_df.loc[i, "3gram"] = str( dict(zip(ngram_vector.get_feature_names_out(), ngrams[0])) ) new_df.isna().sum() new_df = new_df.dropna() new_df # ## 1.5 Разведочный анализ # __Создам DF с целевой переменной__ # Считаем данные из JSON файла with open("Target.json", "r", encoding="utf-8") as my_file: # открытие target_json = my_file.read() # чтение targets = json.loads(target_json) # загрузка company_name = [] nominant = [] for i in range(len(targets["target"])): company_name.append(targets["target"][i]["Сompany"]) nominant.append(targets["target"][i]["Nominations"]) targets = pd.DataFrame() targets["company_name"] = company_name targets["nomination"] = nominant targets # __Добавлю целевую переменную в основной набор данных__ new_df = new_df.reset_index(drop=True) new_target = [] for i in range(len(new_df["name"])): try: k = 0 for j in range(len(targets["company_name"])): if ( new_df["name"][i].lower().strip() == targets["company_name"][j].lower().strip() ): new_target.append(targets["nomination"][j]) else: k = k + 1 if k == len(targets["company_name"]): new_target.append("Нет номинации") except: print(i) continue len(new_target) new_df["target"] = new_target new_df.head() new_df.isna().sum() # Закодирую номинации import pylab import scipy.stats as stats from sklearn import preprocessing label_encoder = preprocessing.LabelEncoder() new_df["target_code"] = label_encoder.fit_transform(new_df["target"]) new_df new_df.to_csv("dataset.csv") # __График распределения целевой переменной__ import seaborn as sns sns_plot = sns.distplot(new_df["target_code"]) fig = sns_plot.get_figure() sns_plot = sns.boxplot(new_df["target_code"]) fig = sns_plot.get_figure() # __Статическая оценка распределения целевой переменной с помощью теста Шапиро-Уилка__ from scipy.stats import shapiro stat, pvalue = shapiro(new_df["target_code"]) print(stat, pvalue) if pvalue > 0.05: print("Нормальное распределение") else: print("Ненормальное распределение") # Распределение целевой переменной не является нормальным. # __Зависимость темы от временных признаков__ new_df["date"] # Преобразую дату в нужный формат. datetime.datetime.strptime(new_df["date"][2], "%d %b %Y в %H:%M") # __Зависимость темы от рейтинга__ sns.set(rc={"figure.figsize": (20, 10)}) sns.scatterplot(data=new_df, x="target", y="rating") # __Количество публикаций по рейтингу__ sns.set(rc={"figure.figsize": (20, 10)}) sns.countplot(x=new_df["rating"])
false
0
5,295
0
5,295
5,295
129551823
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") df grek = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") grek.head() grek.shape grek.isna().sum() grek["Alpha"].unique() grek = grek.drop(columns="Epsilon", axis=1) grek grek.drop(columns="Id", axis=1, inplace=True) grek df.drop(columns="Id", axis=1, inplace=True) df df.isna().sum() df.dtypes import seaborn as sns df["Class"].value_counts() df["EJ"].value_counts() maj = {"A": 0, "B": 1} df["EJ"] = df["EJ"].map(maj) df.columns df.head() x = df.drop(columns="Class", axis=1) y = df["Class"] from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler() lis = [ "AB", "AF", "AH", "AM", "AR", "AX", "AY", "AZ", "BC", "BD ", "BN", "BP", "BQ", "BR", "BZ", "CB", "CC", "CD ", "CF", "CH", "CL", "CR", "CS", "CU", "CW ", "DA", "DE", "DF", "DH", "DI", "DL", "DN", "DU", "DV", "DY", "EB", "EE", "EG", "EH", "EJ", "EL", "EP", "EU", "FC", "FD ", "FE", "FI", "FL", "FR", "FS", "GB", "GE", "GF", "GH", "GI", "GL", ] for i in lis: x[i] = x[i].fillna(x[i].mean()) x.isna().sum() xx = pd.DataFrame(sc.fit_transform(x), columns=x.columns) xx xx.isna().sum() from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( xx, y, test_size=0.35, random_state=42 ) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(x_train, y_train) import xgboost classifier = xgboost.XGBClassifier() classifier.fit(x_train, y_train) params = { "learning_rate": [0.05, 0.10, 0.15, 0.20, 0.25, 0.30], "max_depth": [3, 4, 5, 6, 8, 10, 12, 15], "min_child_weight": [1, 3, 5, 7, 9], "gamma": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1], "colsample_bytree": [0.3, 0.4, 0.5, 0.7, 0.8, 0.9], } from sklearn.model_selection import RandomizedSearchCV, GridSearchCV random_search = RandomizedSearchCV( classifier, param_distributions=params, n_iter=10, scoring="f1", n_jobs=-1, cv=5, verbose=3, ) random_search.fit(x_train, y_train) random_search.best_estimator_ from sklearn.metrics import accuracy_score acc2 = accuracy_score(random_search.predict(x_train), y_train) print(acc2) acc3 = accuracy_score(random_search.predict(x_test), y_test) print(acc3) from sklearn.metrics import accuracy_score acc = accuracy_score(lr.predict(x_train), y_train) print(acc) acc1 = accuracy_score(lr.predict(x_test), y_test) print(acc1) test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") test test.dtypes test.drop(columns="Id", axis=1, inplace=True) test lis = [ "AB", "AF", "AH", "AM", "AR", "AX", "AY", "AZ", "BC", "BD ", "BN", "BP", "BQ", "BR", "BZ", "CB", "CC", "CD ", "CF", "CH", "CL", "CR", "CS", "CU", "CW ", "DA", "DE", "DF", "DH", "DI", "DL", "DN", "DU", "DV", "DY", "EB", "EE", "EG", "EH", "EJ", "EL", "EP", "EU", "FC", "FD ", "FE", "FI", "FL", "FR", "FS", "GB", "GE", "GF", "GH", "GI", "GL", ] for i in lis: if i != "EJ": test[i] = test[i].fillna(test[i].mean()) else: test[i] = test[i].fillna(test[i].mode()[0]) maj = {"A": 0, "B": 1} test["EJ"] = test["EJ"].map(maj) test test.dtypes tests = pd.DataFrame(sc.fit_transform(test), columns=test.columns) pred = random_search.predict_proba(test) pred pred clas0 = [] clas1 = [] for i in range(2): for j in range(len(pred)): if i == 0: clas0.append(pred[j][i]) else: clas1.append(pred[j][i]) sub = pd.read_csv( "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" ) subm = pd.DataFrame({"class_0": clas0, "class_1": clas1}, index=sub.Id) submission_df = subm.reset_index() submission_df.head() submission_df.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/551/129551823.ipynb
null
null
[{"Id": 129551823, "ScriptId": 38522395, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10250857, "CreationDate": "05/14/2023 18:57:53", "VersionNumber": 2.0, "Title": "notebook34acee4379", "EvaluationDate": "05/14/2023", "IsChange": false, "TotalLines": 172.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 172.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") df grek = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") grek.head() grek.shape grek.isna().sum() grek["Alpha"].unique() grek = grek.drop(columns="Epsilon", axis=1) grek grek.drop(columns="Id", axis=1, inplace=True) grek df.drop(columns="Id", axis=1, inplace=True) df df.isna().sum() df.dtypes import seaborn as sns df["Class"].value_counts() df["EJ"].value_counts() maj = {"A": 0, "B": 1} df["EJ"] = df["EJ"].map(maj) df.columns df.head() x = df.drop(columns="Class", axis=1) y = df["Class"] from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler() lis = [ "AB", "AF", "AH", "AM", "AR", "AX", "AY", "AZ", "BC", "BD ", "BN", "BP", "BQ", "BR", "BZ", "CB", "CC", "CD ", "CF", "CH", "CL", "CR", "CS", "CU", "CW ", "DA", "DE", "DF", "DH", "DI", "DL", "DN", "DU", "DV", "DY", "EB", "EE", "EG", "EH", "EJ", "EL", "EP", "EU", "FC", "FD ", "FE", "FI", "FL", "FR", "FS", "GB", "GE", "GF", "GH", "GI", "GL", ] for i in lis: x[i] = x[i].fillna(x[i].mean()) x.isna().sum() xx = pd.DataFrame(sc.fit_transform(x), columns=x.columns) xx xx.isna().sum() from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( xx, y, test_size=0.35, random_state=42 ) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(x_train, y_train) import xgboost classifier = xgboost.XGBClassifier() classifier.fit(x_train, y_train) params = { "learning_rate": [0.05, 0.10, 0.15, 0.20, 0.25, 0.30], "max_depth": [3, 4, 5, 6, 8, 10, 12, 15], "min_child_weight": [1, 3, 5, 7, 9], "gamma": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1], "colsample_bytree": [0.3, 0.4, 0.5, 0.7, 0.8, 0.9], } from sklearn.model_selection import RandomizedSearchCV, GridSearchCV random_search = RandomizedSearchCV( classifier, param_distributions=params, n_iter=10, scoring="f1", n_jobs=-1, cv=5, verbose=3, ) random_search.fit(x_train, y_train) random_search.best_estimator_ from sklearn.metrics import accuracy_score acc2 = accuracy_score(random_search.predict(x_train), y_train) print(acc2) acc3 = accuracy_score(random_search.predict(x_test), y_test) print(acc3) from sklearn.metrics import accuracy_score acc = accuracy_score(lr.predict(x_train), y_train) print(acc) acc1 = accuracy_score(lr.predict(x_test), y_test) print(acc1) test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") test test.dtypes test.drop(columns="Id", axis=1, inplace=True) test lis = [ "AB", "AF", "AH", "AM", "AR", "AX", "AY", "AZ", "BC", "BD ", "BN", "BP", "BQ", "BR", "BZ", "CB", "CC", "CD ", "CF", "CH", "CL", "CR", "CS", "CU", "CW ", "DA", "DE", "DF", "DH", "DI", "DL", "DN", "DU", "DV", "DY", "EB", "EE", "EG", "EH", "EJ", "EL", "EP", "EU", "FC", "FD ", "FE", "FI", "FL", "FR", "FS", "GB", "GE", "GF", "GH", "GI", "GL", ] for i in lis: if i != "EJ": test[i] = test[i].fillna(test[i].mean()) else: test[i] = test[i].fillna(test[i].mode()[0]) maj = {"A": 0, "B": 1} test["EJ"] = test["EJ"].map(maj) test test.dtypes tests = pd.DataFrame(sc.fit_transform(test), columns=test.columns) pred = random_search.predict_proba(test) pred pred clas0 = [] clas1 = [] for i in range(2): for j in range(len(pred)): if i == 0: clas0.append(pred[j][i]) else: clas1.append(pred[j][i]) sub = pd.read_csv( "/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv" ) subm = pd.DataFrame({"class_0": clas0, "class_1": clas1}, index=sub.Id) submission_df = subm.reset_index() submission_df.head() submission_df.to_csv("submission.csv", index=False)
false
0
1,855
0
1,855
1,855
129551977
# # Introduction # More than a hundred AI papers are published every day, making it exceedingly hard to keep up with current innovations. The goal of this competition is to tap into the diverse expertise of the Kaggle community to centralize and summarize the rapid advancements in AI from the past two years. The Kaggle community has a breadth and depth of AI experience which extends beyond the reach of any single individual or research group. We aim to share your collective perspective with the broader research community. # We have to write an essay on one of the following seven topics, with a prompt to describe what the community has learned over the past 2 years of working and experimenting with: # Text data # Image and/or video data # Tabular and/or time series data # Kaggle Competitions # Generative AI # AI ethics # Other (anything that does not fall into any other category) # From the Kaggle data collection, use any resources and also take part in a peer feedback process that involves reviewing three peer essays. Foreference [notebook](https://www.kaggle.com/code/docxian/kaggle-ai-report-access-to-data). Please upvote reference book also. # Basic Imports import os import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Import plotting import matplotlib.pyplot as plt import seaborn as sns # Import Word cloud from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator # Import Json Lib import json # # Data Enginnering # Load data from both of the Json file, arxiv and kaggle writeups. Explore it, manage it, modify it ..!! for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # # Arxiv Metadata # import arxiv meta data from JSON file arxiv_json_file = "../input/2023-kaggle-ai-report/arxiv_metadata_20230510.json" dict_arxiv = [] for line in open(arxiv_json_file, "r"): dict_arxiv.append(json.loads(line)) # convert to data frame df_arxiv_metadata = pd.DataFrame.from_dict(dict_arxiv) # clean up del dict_arxiv # Arxiv metadata detail info df_arxiv_metadata.info() # Overview of Arxiv metadata df_arxiv_metadata.head() # Arxiv Metadata Word Cloud category_text_arxiv = " ".join( xx for xx in df_arxiv_metadata[df_arxiv_metadata.categories == "cs.AI"] .reset_index() .abstract ) # Stop Word for cloud stopwords_cloud = set(STOPWORDS) # Plot word cloud wordcloud = WordCloud( stopwords=stopwords_cloud, max_font_size=50, max_words=250, width=600, height=400, background_color="black", ).generate(category_text_arxiv) plt.figure(figsize=(12, 8)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # # Kaggle Writeups # import Kaggle Writeup data from files. df_kaggle_writeup = pd.read_csv( "../input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv" ) # Get detailed info df_kaggle_writeup.info() # Overview of Kaggle Writeup data df_kaggle_writeup.head() # Kaggle Writeup - Title Of comptetion df_kaggle_writeup["Title of Competition"].value_counts() # Kaggle Writeup Word Cloud category_text_kaggle = " ".join( xx for xx in df_kaggle_writeup[ df_kaggle_writeup["Title of Competition"] == "Feedback Prize - English Language Learning" ].reset_index()["Title of Writeup"] ) # Stop Word for cloud stopwords_cloud = set(STOPWORDS) # Plot word cloud wordcloud = WordCloud( stopwords=stopwords_cloud, max_font_size=50, max_words=250, width=600, height=400, background_color="black", ).generate(category_text_kaggle) plt.figure(figsize=(12, 8)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/551/129551977.ipynb
null
null
[{"Id": 129551977, "ScriptId": 38434196, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1415704, "CreationDate": "05/14/2023 18:59:23", "VersionNumber": 1.0, "Title": "Peeking to Kaggle AI Report - Access to data", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 100.0, "LinesInsertedFromPrevious": 100.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
# # Introduction # More than a hundred AI papers are published every day, making it exceedingly hard to keep up with current innovations. The goal of this competition is to tap into the diverse expertise of the Kaggle community to centralize and summarize the rapid advancements in AI from the past two years. The Kaggle community has a breadth and depth of AI experience which extends beyond the reach of any single individual or research group. We aim to share your collective perspective with the broader research community. # We have to write an essay on one of the following seven topics, with a prompt to describe what the community has learned over the past 2 years of working and experimenting with: # Text data # Image and/or video data # Tabular and/or time series data # Kaggle Competitions # Generative AI # AI ethics # Other (anything that does not fall into any other category) # From the Kaggle data collection, use any resources and also take part in a peer feedback process that involves reviewing three peer essays. Foreference [notebook](https://www.kaggle.com/code/docxian/kaggle-ai-report-access-to-data). Please upvote reference book also. # Basic Imports import os import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Import plotting import matplotlib.pyplot as plt import seaborn as sns # Import Word cloud from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator # Import Json Lib import json # # Data Enginnering # Load data from both of the Json file, arxiv and kaggle writeups. Explore it, manage it, modify it ..!! for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # # Arxiv Metadata # import arxiv meta data from JSON file arxiv_json_file = "../input/2023-kaggle-ai-report/arxiv_metadata_20230510.json" dict_arxiv = [] for line in open(arxiv_json_file, "r"): dict_arxiv.append(json.loads(line)) # convert to data frame df_arxiv_metadata = pd.DataFrame.from_dict(dict_arxiv) # clean up del dict_arxiv # Arxiv metadata detail info df_arxiv_metadata.info() # Overview of Arxiv metadata df_arxiv_metadata.head() # Arxiv Metadata Word Cloud category_text_arxiv = " ".join( xx for xx in df_arxiv_metadata[df_arxiv_metadata.categories == "cs.AI"] .reset_index() .abstract ) # Stop Word for cloud stopwords_cloud = set(STOPWORDS) # Plot word cloud wordcloud = WordCloud( stopwords=stopwords_cloud, max_font_size=50, max_words=250, width=600, height=400, background_color="black", ).generate(category_text_arxiv) plt.figure(figsize=(12, 8)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # # Kaggle Writeups # import Kaggle Writeup data from files. df_kaggle_writeup = pd.read_csv( "../input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv" ) # Get detailed info df_kaggle_writeup.info() # Overview of Kaggle Writeup data df_kaggle_writeup.head() # Kaggle Writeup - Title Of comptetion df_kaggle_writeup["Title of Competition"].value_counts() # Kaggle Writeup Word Cloud category_text_kaggle = " ".join( xx for xx in df_kaggle_writeup[ df_kaggle_writeup["Title of Competition"] == "Feedback Prize - English Language Learning" ].reset_index()["Title of Writeup"] ) # Stop Word for cloud stopwords_cloud = set(STOPWORDS) # Plot word cloud wordcloud = WordCloud( stopwords=stopwords_cloud, max_font_size=50, max_words=250, width=600, height=400, background_color="black", ).generate(category_text_kaggle) plt.figure(figsize=(12, 8)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show()
false
0
1,107
2
1,107
1,107
129551186
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # ICR - Identifying Age-Related Conditions # ## Use Machine Learning to detect conditions with measurements of anonymous characteristics # ## Context # They say age is just a number but a whole host of health issues come with aging. From heart disease and dementia to hearing loss and arthritis, aging is a risk factor for numerous diseases and complications. The growing field of bioinformatics includes research into interventions that can help slow and reverse biological aging and prevent major age-related ailments. Data science could have a role to play in developing new methods to solve problems with diverse data, even if the number of samples is small. # Currently, models like XGBoost and random forest are used to predict medical conditions yet the models' performance is not good enough. Dealing with critical problems where lives are on the line, models need to make correct predictions reliably and consistently between different cases. # Founded in 2015, competition host InVitro Cell Research, LLC (ICR) is a privately funded company focused on regenerative and preventive personalized medicine. Their offices and labs in the greater New York City area offer state-of-the-art research space. InVitro Cell Research's Scientists are what set them apart, helping guide and defining their mission of researching how to repair aging people fast. # In this competition, you’ll work with measurements of health characteristic data to solve critical problems in bioinformatics. Based on minimal training, you’ll create a model to predict if a person has any of three medical conditions, with an aim to improve on existing methods. # You could help advance the growing field of bioinformatics and explore new methods to solve complex problems with diverse data. # ### Importing necessary libraries import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px # ## 1. Data Understanding and inspection of missing and incompatible values # Loading training dataset df_train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") # Loading greeks dataset df_greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") # Inspecting the Training dataset df_train.info() # Inspecting the Greeks dataset df_greeks.info() # Also let us merge the two datasets to form a final master dataset containing all the necessary details df = pd.merge(df_train, df_greeks, on="Id") # Inspecting the data df.info() # We have therefore, columns Id,EJ and the greek columns with categorical values and the rest being numerical # Therefore, Class is our target variable. Also, it seems there are a few missing values in the training and master dataset. Let us handle these values # Checking the percentage of missing value by columns missing_values = df_train.isnull().mean() * 100 missing_values[missing_values > 0] # Checking the percentage of missing value by columns in the master data set too missing_values_2 = df.isnull().mean() * 100 missing_values_2[missing_values_2 > 0] # Thus, these are the columns that have missing values. Among them, BQ and EL are the highest with almost 9.7% values missing # # 2. EDA and Data correction # ### Handling missing data # Let us first check the distribution of the columns with null values # Printing missing columns missing_cols = missing_values_2[missing_values_2 > 0].index.to_list() missing_cols # Printing the distribution for BQ and EL multiple missing value columns df[missing_cols].hist(bins=100) plt.show() # For this, we can use KNN imputer for the following reasons. # Some Advantages of KNN # 1. Quick calculation time # # 2. Simple algorithm – to interpret # # 3. Versatile – useful for regression and classification # # 4. High accuracy – you do not need to compare with better-supervised learning models # # 5. No assumptions about data – no need to make additional assumptions, tune several parameters, or build a model. This makes it crucial in nonlinear data case. # from sklearn.impute import KNNImputer imputer01 = KNNImputer(n_neighbors=3) tr_data_01 = imputer01.fit_transform(df[missing_cols]) df[missing_cols] = tr_data_01 df_train[missing_cols] = tr_data_01 # Checking the null value distribution now # Checking the percentage of missing value by columns missing_values = df_train.isnull().mean() * 100 missing_values[missing_values > 0] # Checking the percentage of missing value by columns missing_values = df.isnull().mean() * 100 missing_values[missing_values > 0] # ### Checking the distribution of the target variable df["Class"].hist() # Plotting a pie chart to understand this better data = df["Class"].value_counts() fig = px.pie(data, values=data, names=data.index) fig.show() # Therefore, the dataset is highly imbalanced as the classes 1 and 0 are 17.5% and 82.5% of the dataset respectively # We will be handling this imbalance a little later # ### Univariate Analysis # We begin with setting up a column list # Setting up column list target_col = ["Class"] greek_cols = list(df_greeks.columns) id_col = ["Id"] cat_cols = ["EJ"] num_cols = [ col for col in df.columns if col not in greek_cols + cat_cols + target_col + id_col ] print(greek_cols + cat_cols + target_col + id_col) # Checking for categorical columns df[cat_cols[0]].hist() # Plotting a pie chart to understand this better data = df[cat_cols[0]].value_counts() fig = px.pie(data, values=data, names=data.index) fig.show() # Therefore, the values A and B can be mapped to 1 and 0 # Transforming EJ by mapping A and B to 1 and 0 respectively df[cat_cols[0]] = df[cat_cols[0]].map({"A": 1, "B": 0}) # Plotting a pie chart to understand this better data = df[cat_cols[0]].value_counts() fig = px.pie(data, values=data, names=data.index) fig.show() # Checking the distribution of numeric columns # Setting max column width pd.set_option("display.max_columns", 500) # Printing the description of numerical columns df.describe() # Going forward we would have to scale the numeric columns during model building # Plotting the distribution of numerical columns for i, col in enumerate(num_cols): plt.figure(i) # sns.boxplot(x=df[col]) sns.histplot(df, x=col, kde=True) # ### Bivariate and Multivariate Analysis # Plotting the distribution of numerical columns with the target column for i, col in enumerate(num_cols): plt.figure(i) sns.boxplot(data=df, x="Class", y=col) # Plotting the distribution of numerical columns with the EJ column for i, col in enumerate(num_cols): plt.figure(i) sns.boxplot(data=df, x="EJ", y=col) # We can see a lot of outliers for most of the numerical columns for both the distributions. # Let us now plot EJ against the Target # Plotting the distribution of EJ with the target column sns.boxplot(data=df, x="Class", y="EJ") # #### Checking for correlation amongst the columns through multivariate analysis # Displaying a pairplot with target variable and other numeric variables # print(num_cols + target_col) sns.pairplot(df[num_cols[:6] + target_col], hue="Class") sns.pairplot(df[num_cols[6:12] + target_col], hue="Class") sns.pairplot(df[num_cols[12:18] + target_col], hue="Class") sns.pairplot(df[num_cols[12:18] + target_col], hue="Class") sns.pairplot(df[num_cols[18:24] + target_col], hue="Class") sns.pairplot(df[num_cols[24:32] + target_col], hue="Class") sns.pairplot(df[num_cols[32:40] + target_col], hue="Class") sns.pairplot(df[num_cols[40:46] + target_col], hue="Class") sns.pairplot(df[num_cols[46:] + target_col], hue="Class") # Listing correlation between columns # Setting max column width pd.set_option("display.max_rows", None) # Getting the correlation between the different variables corr_mat = df[num_cols + cat_cols].corr(method="pearson") # Convert correlation matrix to 1-D Series and sort sorted_mat = corr_mat.unstack().sort_values(ascending=False) # Listing correlation values that are above 60% corr_data = sorted_mat[((sorted_mat >= 0.6) | (sorted_mat <= -0.6)) & (sorted_mat != 1)] corr_data # Getting the list of columns to be removed removal_cols = set([col[0] for col in corr_data.index.to_list()[::2]]) removal_cols # Creating working copy of the original dataframe data = df.copy() # Dropping the Greek columns data.drop(columns=greek_cols, axis=1, inplace=True) # Dropping the columns with high correlation data.drop(columns=removal_cols, axis=1, inplace=True) # Inspecting the data data.info() # Therefore we will list the final list of input and target columns final_cols = data.select_dtypes(include=["float64", "int64"]).columns final_cols # # 3. Preprocessing and Model Building # looking at the distribution of the values data.describe() # Based on the distribution, going forward we would have to scale the numeric columns # Removing Target column final_cols = final_cols.to_list() final_cols.remove("Class") num_cols = final_cols.copy() # Removing Categorical EJ column num_cols.remove("EJ") # Listing numeric columns num_cols # ### But before that, we need to handle the class imbalance of the target variable. As we have seen earlier, only 17.5% of the data is 1 # Getting train test split X = data[num_cols + cat_cols] y = data[target_col] from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler # Using ADASYN for resampling from imblearn.over_sampling import ADASYN adasyn = ADASYN() X, y = adasyn.fit_resample(X, y) # Plotting a pie chart to understand this better pie_data = y["Class"].value_counts() fig = px.pie(pie_data, values=pie_data, names=pie_data.index) fig.show() # We can thus see that the data is a lot more balanced # We now perform the train test split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=108 ) # Scaling the data scaler = StandardScaler() X_train_sc = scaler.fit_transform(X_train) X_test_sc = scaler.transform(X_test) # #### A. Baseline Model - Simple Logistic Regression from sklearn.linear_model import LogisticRegression lr0 = LogisticRegression() lr0.fit(X_train_sc, y_train) from sklearn.metrics import ( classification_report, accuracy_score, precision_score, recall_score, auc, roc_curve, ) y_train_pred = lr0.predict(X_train_sc) # Getting metrics for train data print(classification_report(y_train, y_train_pred)) print(accuracy_score(y_train, y_train_pred)) print(precision_score(y_train, y_train_pred)) print(recall_score(y_train, y_train_pred)) y_pred = lr0.predict(X_test_sc) # Getting metrics for train data print(classification_report(y_test, y_pred)) print(accuracy_score(y_test, y_pred)) print(precision_score(y_test, y_pred)) print(recall_score(y_test, y_pred)) # Thus, we see that the accuracy on both train and test data are very close # ##### Plotting ROC Curve pred = lr0.predict_proba(X_test_sc) fpr, tpr, thresholds = roc_curve(y_test, pred[:, 1], pos_label=1) plt.plot(fpr, tpr) # ##### Calculating ROC AUC Score from sklearn.metrics import roc_auc_score # auc scores auc_score1 = roc_auc_score(y_test, pred[:, 1]) print(auc_score1) # Checking the test data # Loading training dataset df_test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") # Inspecting the data df_test.info() df_test.head() # Transforming EJ by mapping A and B to 1 and 0 respectively df_test["EJ"] = df_test["EJ"].map({"A": 1, "B": 0}) df_test["EJ"] # Creating a function to generate the CSV for prediction def predict_csv(df, model, scaler): X = df[num_cols + cat_cols] # Scaling all the numeric columns X_sc = scaler.transform(X) y_pred = model.predict(X_sc) pred = model.predict_proba(X_sc) print(pred) data = pd.DataFrame(columns=["Id", "class_0", "class_1"]) data["Id"] = df["Id"] data["class_0"] = pred[:, 0] data["class_1"] = pred[:, 1] print(data) data.to_csv("/kaggle/working/submission.csv") predict_csv(df_test, lr0, scaler)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/551/129551186.ipynb
null
null
[{"Id": 129551186, "ScriptId": 38468484, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2145674, "CreationDate": "05/14/2023 18:50:20", "VersionNumber": 3.0, "Title": "ICR_Submission_SVK", "EvaluationDate": "05/14/2023", "IsChange": false, "TotalLines": 402.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 402.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # ICR - Identifying Age-Related Conditions # ## Use Machine Learning to detect conditions with measurements of anonymous characteristics # ## Context # They say age is just a number but a whole host of health issues come with aging. From heart disease and dementia to hearing loss and arthritis, aging is a risk factor for numerous diseases and complications. The growing field of bioinformatics includes research into interventions that can help slow and reverse biological aging and prevent major age-related ailments. Data science could have a role to play in developing new methods to solve problems with diverse data, even if the number of samples is small. # Currently, models like XGBoost and random forest are used to predict medical conditions yet the models' performance is not good enough. Dealing with critical problems where lives are on the line, models need to make correct predictions reliably and consistently between different cases. # Founded in 2015, competition host InVitro Cell Research, LLC (ICR) is a privately funded company focused on regenerative and preventive personalized medicine. Their offices and labs in the greater New York City area offer state-of-the-art research space. InVitro Cell Research's Scientists are what set them apart, helping guide and defining their mission of researching how to repair aging people fast. # In this competition, you’ll work with measurements of health characteristic data to solve critical problems in bioinformatics. Based on minimal training, you’ll create a model to predict if a person has any of three medical conditions, with an aim to improve on existing methods. # You could help advance the growing field of bioinformatics and explore new methods to solve complex problems with diverse data. # ### Importing necessary libraries import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px # ## 1. Data Understanding and inspection of missing and incompatible values # Loading training dataset df_train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") # Loading greeks dataset df_greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") # Inspecting the Training dataset df_train.info() # Inspecting the Greeks dataset df_greeks.info() # Also let us merge the two datasets to form a final master dataset containing all the necessary details df = pd.merge(df_train, df_greeks, on="Id") # Inspecting the data df.info() # We have therefore, columns Id,EJ and the greek columns with categorical values and the rest being numerical # Therefore, Class is our target variable. Also, it seems there are a few missing values in the training and master dataset. Let us handle these values # Checking the percentage of missing value by columns missing_values = df_train.isnull().mean() * 100 missing_values[missing_values > 0] # Checking the percentage of missing value by columns in the master data set too missing_values_2 = df.isnull().mean() * 100 missing_values_2[missing_values_2 > 0] # Thus, these are the columns that have missing values. Among them, BQ and EL are the highest with almost 9.7% values missing # # 2. EDA and Data correction # ### Handling missing data # Let us first check the distribution of the columns with null values # Printing missing columns missing_cols = missing_values_2[missing_values_2 > 0].index.to_list() missing_cols # Printing the distribution for BQ and EL multiple missing value columns df[missing_cols].hist(bins=100) plt.show() # For this, we can use KNN imputer for the following reasons. # Some Advantages of KNN # 1. Quick calculation time # # 2. Simple algorithm – to interpret # # 3. Versatile – useful for regression and classification # # 4. High accuracy – you do not need to compare with better-supervised learning models # # 5. No assumptions about data – no need to make additional assumptions, tune several parameters, or build a model. This makes it crucial in nonlinear data case. # from sklearn.impute import KNNImputer imputer01 = KNNImputer(n_neighbors=3) tr_data_01 = imputer01.fit_transform(df[missing_cols]) df[missing_cols] = tr_data_01 df_train[missing_cols] = tr_data_01 # Checking the null value distribution now # Checking the percentage of missing value by columns missing_values = df_train.isnull().mean() * 100 missing_values[missing_values > 0] # Checking the percentage of missing value by columns missing_values = df.isnull().mean() * 100 missing_values[missing_values > 0] # ### Checking the distribution of the target variable df["Class"].hist() # Plotting a pie chart to understand this better data = df["Class"].value_counts() fig = px.pie(data, values=data, names=data.index) fig.show() # Therefore, the dataset is highly imbalanced as the classes 1 and 0 are 17.5% and 82.5% of the dataset respectively # We will be handling this imbalance a little later # ### Univariate Analysis # We begin with setting up a column list # Setting up column list target_col = ["Class"] greek_cols = list(df_greeks.columns) id_col = ["Id"] cat_cols = ["EJ"] num_cols = [ col for col in df.columns if col not in greek_cols + cat_cols + target_col + id_col ] print(greek_cols + cat_cols + target_col + id_col) # Checking for categorical columns df[cat_cols[0]].hist() # Plotting a pie chart to understand this better data = df[cat_cols[0]].value_counts() fig = px.pie(data, values=data, names=data.index) fig.show() # Therefore, the values A and B can be mapped to 1 and 0 # Transforming EJ by mapping A and B to 1 and 0 respectively df[cat_cols[0]] = df[cat_cols[0]].map({"A": 1, "B": 0}) # Plotting a pie chart to understand this better data = df[cat_cols[0]].value_counts() fig = px.pie(data, values=data, names=data.index) fig.show() # Checking the distribution of numeric columns # Setting max column width pd.set_option("display.max_columns", 500) # Printing the description of numerical columns df.describe() # Going forward we would have to scale the numeric columns during model building # Plotting the distribution of numerical columns for i, col in enumerate(num_cols): plt.figure(i) # sns.boxplot(x=df[col]) sns.histplot(df, x=col, kde=True) # ### Bivariate and Multivariate Analysis # Plotting the distribution of numerical columns with the target column for i, col in enumerate(num_cols): plt.figure(i) sns.boxplot(data=df, x="Class", y=col) # Plotting the distribution of numerical columns with the EJ column for i, col in enumerate(num_cols): plt.figure(i) sns.boxplot(data=df, x="EJ", y=col) # We can see a lot of outliers for most of the numerical columns for both the distributions. # Let us now plot EJ against the Target # Plotting the distribution of EJ with the target column sns.boxplot(data=df, x="Class", y="EJ") # #### Checking for correlation amongst the columns through multivariate analysis # Displaying a pairplot with target variable and other numeric variables # print(num_cols + target_col) sns.pairplot(df[num_cols[:6] + target_col], hue="Class") sns.pairplot(df[num_cols[6:12] + target_col], hue="Class") sns.pairplot(df[num_cols[12:18] + target_col], hue="Class") sns.pairplot(df[num_cols[12:18] + target_col], hue="Class") sns.pairplot(df[num_cols[18:24] + target_col], hue="Class") sns.pairplot(df[num_cols[24:32] + target_col], hue="Class") sns.pairplot(df[num_cols[32:40] + target_col], hue="Class") sns.pairplot(df[num_cols[40:46] + target_col], hue="Class") sns.pairplot(df[num_cols[46:] + target_col], hue="Class") # Listing correlation between columns # Setting max column width pd.set_option("display.max_rows", None) # Getting the correlation between the different variables corr_mat = df[num_cols + cat_cols].corr(method="pearson") # Convert correlation matrix to 1-D Series and sort sorted_mat = corr_mat.unstack().sort_values(ascending=False) # Listing correlation values that are above 60% corr_data = sorted_mat[((sorted_mat >= 0.6) | (sorted_mat <= -0.6)) & (sorted_mat != 1)] corr_data # Getting the list of columns to be removed removal_cols = set([col[0] for col in corr_data.index.to_list()[::2]]) removal_cols # Creating working copy of the original dataframe data = df.copy() # Dropping the Greek columns data.drop(columns=greek_cols, axis=1, inplace=True) # Dropping the columns with high correlation data.drop(columns=removal_cols, axis=1, inplace=True) # Inspecting the data data.info() # Therefore we will list the final list of input and target columns final_cols = data.select_dtypes(include=["float64", "int64"]).columns final_cols # # 3. Preprocessing and Model Building # looking at the distribution of the values data.describe() # Based on the distribution, going forward we would have to scale the numeric columns # Removing Target column final_cols = final_cols.to_list() final_cols.remove("Class") num_cols = final_cols.copy() # Removing Categorical EJ column num_cols.remove("EJ") # Listing numeric columns num_cols # ### But before that, we need to handle the class imbalance of the target variable. As we have seen earlier, only 17.5% of the data is 1 # Getting train test split X = data[num_cols + cat_cols] y = data[target_col] from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler # Using ADASYN for resampling from imblearn.over_sampling import ADASYN adasyn = ADASYN() X, y = adasyn.fit_resample(X, y) # Plotting a pie chart to understand this better pie_data = y["Class"].value_counts() fig = px.pie(pie_data, values=pie_data, names=pie_data.index) fig.show() # We can thus see that the data is a lot more balanced # We now perform the train test split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=108 ) # Scaling the data scaler = StandardScaler() X_train_sc = scaler.fit_transform(X_train) X_test_sc = scaler.transform(X_test) # #### A. Baseline Model - Simple Logistic Regression from sklearn.linear_model import LogisticRegression lr0 = LogisticRegression() lr0.fit(X_train_sc, y_train) from sklearn.metrics import ( classification_report, accuracy_score, precision_score, recall_score, auc, roc_curve, ) y_train_pred = lr0.predict(X_train_sc) # Getting metrics for train data print(classification_report(y_train, y_train_pred)) print(accuracy_score(y_train, y_train_pred)) print(precision_score(y_train, y_train_pred)) print(recall_score(y_train, y_train_pred)) y_pred = lr0.predict(X_test_sc) # Getting metrics for train data print(classification_report(y_test, y_pred)) print(accuracy_score(y_test, y_pred)) print(precision_score(y_test, y_pred)) print(recall_score(y_test, y_pred)) # Thus, we see that the accuracy on both train and test data are very close # ##### Plotting ROC Curve pred = lr0.predict_proba(X_test_sc) fpr, tpr, thresholds = roc_curve(y_test, pred[:, 1], pos_label=1) plt.plot(fpr, tpr) # ##### Calculating ROC AUC Score from sklearn.metrics import roc_auc_score # auc scores auc_score1 = roc_auc_score(y_test, pred[:, 1]) print(auc_score1) # Checking the test data # Loading training dataset df_test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") # Inspecting the data df_test.info() df_test.head() # Transforming EJ by mapping A and B to 1 and 0 respectively df_test["EJ"] = df_test["EJ"].map({"A": 1, "B": 0}) df_test["EJ"] # Creating a function to generate the CSV for prediction def predict_csv(df, model, scaler): X = df[num_cols + cat_cols] # Scaling all the numeric columns X_sc = scaler.transform(X) y_pred = model.predict(X_sc) pred = model.predict_proba(X_sc) print(pred) data = pd.DataFrame(columns=["Id", "class_0", "class_1"]) data["Id"] = df["Id"] data["class_0"] = pred[:, 0] data["class_1"] = pred[:, 1] print(data) data.to_csv("/kaggle/working/submission.csv") predict_csv(df_test, lr0, scaler)
false
0
3,781
0
3,781
3,781
129551675
<jupyter_start><jupyter_text>Customer Segmentation ### Context An automobile company has plans to enter new markets with their existing products (P1, P2, P3, P4 and P5). After intensive market research, they’ve deduced that the behavior of new market is similar to their existing market. ### Content In their existing market, the sales team has classified all customers into 4 segments (A, B, C, D ). Then, they performed segmented outreach and communication for different segment of customers. This strategy has work exceptionally well for them. They plan to use the same strategy on new markets and have identified 2627 new potential customers. You are required to help the manager to predict the right group of the new customers. Kaggle dataset identifier: customer <jupyter_code>import pandas as pd df = pd.read_csv('customer/Train.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 8068 entries, 0 to 8067 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 ID 8068 non-null int64 1 Gender 8068 non-null object 2 Ever_Married 7928 non-null object 3 Age 8068 non-null int64 4 Graduated 7990 non-null object 5 Profession 7944 non-null object 6 Work_Experience 7239 non-null float64 7 Spending_Score 8068 non-null object 8 Family_Size 7733 non-null float64 9 Var_1 7992 non-null object 10 Segmentation 8068 non-null object dtypes: float64(2), int64(2), object(7) memory usage: 693.5+ KB <jupyter_text>Examples: { "ID": 462809, "Gender": "Male", "Ever_Married": "No", "Age": 22, "Graduated": "No", "Profession": "Healthcare", "Work_Experience": 1.0, "Spending_Score": "Low", "Family_Size": 4, "Var_1": "Cat_4", "Segmentation": "D" } { "ID": 462643, "Gender": "Female", "Ever_Married": "Yes", "Age": 38, "Graduated": "Yes", "Profession": "Engineer", "Work_Experience": NaN, "Spending_Score": "Average", "Family_Size": 3, "Var_1": "Cat_4", "Segmentation": "A" } { "ID": 466315, "Gender": "Female", "Ever_Married": "Yes", "Age": 67, "Graduated": "Yes", "Profession": "Engineer", "Work_Experience": 1.0, "Spending_Score": "Low", "Family_Size": 1, "Var_1": "Cat_6", "Segmentation": "B" } { "ID": 461735, "Gender": "Male", "Ever_Married": "Yes", "Age": 67, "Graduated": "Yes", "Profession": "Lawyer", "Work_Experience": 0.0, "Spending_Score": "High", "Family_Size": 2, "Var_1": "Cat_6", "Segmentation": "B" } <jupyter_code>import pandas as pd df = pd.read_csv('customer/Test.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 2627 entries, 0 to 2626 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 ID 2627 non-null int64 1 Gender 2627 non-null object 2 Ever_Married 2577 non-null object 3 Age 2627 non-null int64 4 Graduated 2603 non-null object 5 Profession 2589 non-null object 6 Work_Experience 2358 non-null float64 7 Spending_Score 2627 non-null object 8 Family_Size 2514 non-null float64 9 Var_1 2595 non-null object dtypes: float64(2), int64(2), object(6) memory usage: 205.4+ KB <jupyter_text>Examples: { "ID": 458989, "Gender": "Female", "Ever_Married": "Yes", "Age": 36, "Graduated": "Yes", "Profession": "Engineer", "Work_Experience": 0, "Spending_Score": "Low", "Family_Size": 1, "Var_1": "Cat_6" } { "ID": 458994, "Gender": "Male", "Ever_Married": "Yes", "Age": 37, "Graduated": "Yes", "Profession": "Healthcare", "Work_Experience": 8, "Spending_Score": "Average", "Family_Size": 4, "Var_1": "Cat_6" } { "ID": 458996, "Gender": "Female", "Ever_Married": "Yes", "Age": 69, "Graduated": "No", "Profession": null, "Work_Experience": 0, "Spending_Score": "Low", "Family_Size": 1, "Var_1": "Cat_6" } { "ID": 459000, "Gender": "Male", "Ever_Married": "Yes", "Age": 59, "Graduated": "No", "Profession": "Executive", "Work_Experience": 11, "Spending_Score": "High", "Family_Size": 2, "Var_1": "Cat_6" } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Step 1: Load Train Data df = pd.read_csv("/kaggle/input/customer/Train.csv") df.head() df.info() df["Segmentation"].value_counts() df["Profession"].value_counts() df1 = df.dropna() df1 = df1.drop(["ID"], axis=1) # # Step 2: OneHot Encoding from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df1.head() col = ["Gender", "Ever_Married", "Graduated", "Profession", "Spending_Score", "Var_1"] for i in col: df1[i] = le.fit_transform(df1[i]) df1.head() df2 = df1.drop(["Segmentation"], axis=1) df1["Segmentation"] = df1["Segmentation"].apply( lambda x: 0 if x == "A" else 1 if x == "B" else 2 if x == "C" else 3 ) # # Step 3: Train_Test_Split X = df2 X.head() y = df1["Segmentation"] y.head() from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # # Step 4: Buil Model Classification # # 1. Decision Tree # from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier(criterion="entropy", random_state=0) tree.fit(X_train, y_train) accuracy_score(y_test, tree.predict(X_test)) # # 2. Random Forest from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier() model.fit(X_train, y_train) accuracy_score(y_test, model.predict(X_test)) # # 3. Support Vector Machine from sklearn import svm clf = svm.SVC() clf.fit(X_train, y_train) accuracy_score(y_test, clf.predict(X_test)) # # 4. Naive Bayes from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb.fit(X_train, y_train) accuracy_score(y_test, gnb.predict(X_test)) # # 5. Logistic Regression from sklearn.linear_model import LogisticRegression lgt = LogisticRegression() lgt.fit(X_train, y_train) accuracy_score(y_test, lgt.predict(X_test)) # # Step 5: Load Test Data data = pd.read_csv("/kaggle/input/customer/Test.csv") data.head() data.isnull().sum() # # Step 6: Handle Missing Data and OneHot Encoding data["Ever_Married"] = data["Ever_Married"].fillna(method="bfill") data["Graduated"] = data["Graduated"].fillna(method="bfill") data["Profession"] = data["Profession"].fillna(method="bfill") data["Family_Size"] = data["Family_Size"].fillna(method="bfill") data["Work_Experience"] = data["Work_Experience"].fillna(value=3) data["Var_1"] = data["Var_1"].fillna(method="bfill") data.head() data.isnull().sum() data1 = data.drop(["ID"], axis=1) columns = [ "Gender", "Ever_Married", "Graduated", "Profession", "Spending_Score", "Var_1", ] for i in columns: data1[i] = le.fit_transform(data1[i]) data1.head() # # Step 7: Predict Customer Segmentation Class Using Logistic Regression lgt.predict(data1) Result = pd.DataFrame(data["ID"]) Result["Segmentation_Predict"] = lgt.predict(data1) Result["Segmentation_Predict"] = Result["Segmentation_Predict"].apply( lambda x: "A" if x == 0 else "B" if x == 1 else "C" if x == 2 else "D" ) Result.head() Result.to_csv("/kaggle/working/output.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/551/129551675.ipynb
customer
vetrirah
[{"Id": 129551675, "ScriptId": 38507223, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10273300, "CreationDate": "05/14/2023 18:56:03", "VersionNumber": 2.0, "Title": "notebooka21855ce86", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 155.0, "LinesInsertedFromPrevious": 63.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 92.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185728022, "KernelVersionId": 129551675, "SourceDatasetVersionId": 1447480}]
[{"Id": 1447480, "DatasetId": 848479, "DatasourceVersionId": 1480992, "CreatorUserId": 1200650, "LicenseName": "CC0: Public Domain", "CreationDate": "08/28/2020 11:19:39", "VersionNumber": 1.0, "Title": "Customer Segmentation", "Slug": "customer", "Subtitle": "AV - Janatahack : Customer Segmentation", "Description": "### Context\n\nAn automobile company has plans to enter new markets with their existing products (P1, P2, P3, P4 and P5). After intensive market research, they\u2019ve deduced that the behavior of new market is similar to their existing market. \n\n### Content\n\nIn their existing market, the sales team has classified all customers into 4 segments (A, B, C, D ). Then, they performed segmented outreach and communication for different segment of customers. This strategy has work exceptionally well for them. They plan to use the same strategy on new markets and have identified 2627 new potential customers. \n\nYou are required to help the manager to predict the right group of the new customers.\n\n\n### Acknowledgements\n\nhttps://datahack.analyticsvidhya.com/contest/janatahack-customer-segmentation/#ProblemStatement\n\n### Inspiration\n\nhttps://datahack.analyticsvidhya.com/contest/janatahack-customer-segmentation/#ProblemStatement", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 848479, "CreatorUserId": 1200650, "OwnerUserId": 1200650.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1447480.0, "CurrentDatasourceVersionId": 1480992.0, "ForumId": 863719, "Type": 2, "CreationDate": "08/28/2020 11:19:39", "LastActivityDate": "08/28/2020", "TotalViews": 89295, "TotalDownloads": 11459, "TotalVotes": 125, "TotalKernels": 46}]
[{"Id": 1200650, "UserName": "vetrirah", "DisplayName": "Vetrivel-PS", "RegisterDate": "08/04/2017", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Step 1: Load Train Data df = pd.read_csv("/kaggle/input/customer/Train.csv") df.head() df.info() df["Segmentation"].value_counts() df["Profession"].value_counts() df1 = df.dropna() df1 = df1.drop(["ID"], axis=1) # # Step 2: OneHot Encoding from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df1.head() col = ["Gender", "Ever_Married", "Graduated", "Profession", "Spending_Score", "Var_1"] for i in col: df1[i] = le.fit_transform(df1[i]) df1.head() df2 = df1.drop(["Segmentation"], axis=1) df1["Segmentation"] = df1["Segmentation"].apply( lambda x: 0 if x == "A" else 1 if x == "B" else 2 if x == "C" else 3 ) # # Step 3: Train_Test_Split X = df2 X.head() y = df1["Segmentation"] y.head() from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # # Step 4: Buil Model Classification # # 1. Decision Tree # from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier(criterion="entropy", random_state=0) tree.fit(X_train, y_train) accuracy_score(y_test, tree.predict(X_test)) # # 2. Random Forest from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier() model.fit(X_train, y_train) accuracy_score(y_test, model.predict(X_test)) # # 3. Support Vector Machine from sklearn import svm clf = svm.SVC() clf.fit(X_train, y_train) accuracy_score(y_test, clf.predict(X_test)) # # 4. Naive Bayes from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() gnb.fit(X_train, y_train) accuracy_score(y_test, gnb.predict(X_test)) # # 5. Logistic Regression from sklearn.linear_model import LogisticRegression lgt = LogisticRegression() lgt.fit(X_train, y_train) accuracy_score(y_test, lgt.predict(X_test)) # # Step 5: Load Test Data data = pd.read_csv("/kaggle/input/customer/Test.csv") data.head() data.isnull().sum() # # Step 6: Handle Missing Data and OneHot Encoding data["Ever_Married"] = data["Ever_Married"].fillna(method="bfill") data["Graduated"] = data["Graduated"].fillna(method="bfill") data["Profession"] = data["Profession"].fillna(method="bfill") data["Family_Size"] = data["Family_Size"].fillna(method="bfill") data["Work_Experience"] = data["Work_Experience"].fillna(value=3) data["Var_1"] = data["Var_1"].fillna(method="bfill") data.head() data.isnull().sum() data1 = data.drop(["ID"], axis=1) columns = [ "Gender", "Ever_Married", "Graduated", "Profession", "Spending_Score", "Var_1", ] for i in columns: data1[i] = le.fit_transform(data1[i]) data1.head() # # Step 7: Predict Customer Segmentation Class Using Logistic Regression lgt.predict(data1) Result = pd.DataFrame(data["ID"]) Result["Segmentation_Predict"] = lgt.predict(data1) Result["Segmentation_Predict"] = Result["Segmentation_Predict"].apply( lambda x: "A" if x == 0 else "B" if x == 1 else "C" if x == 2 else "D" ) Result.head() Result.to_csv("/kaggle/working/output.csv", index=False)
[{"customer/Train.csv": {"column_names": "[\"ID\", \"Gender\", \"Ever_Married\", \"Age\", \"Graduated\", \"Profession\", \"Work_Experience\", \"Spending_Score\", \"Family_Size\", \"Var_1\", \"Segmentation\"]", "column_data_types": "{\"ID\": \"int64\", \"Gender\": \"object\", \"Ever_Married\": \"object\", \"Age\": \"int64\", \"Graduated\": \"object\", \"Profession\": \"object\", \"Work_Experience\": \"float64\", \"Spending_Score\": \"object\", \"Family_Size\": \"float64\", \"Var_1\": \"object\", \"Segmentation\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 8068 entries, 0 to 8067\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ID 8068 non-null int64 \n 1 Gender 8068 non-null object \n 2 Ever_Married 7928 non-null object \n 3 Age 8068 non-null int64 \n 4 Graduated 7990 non-null object \n 5 Profession 7944 non-null object \n 6 Work_Experience 7239 non-null float64\n 7 Spending_Score 8068 non-null object \n 8 Family_Size 7733 non-null float64\n 9 Var_1 7992 non-null object \n 10 Segmentation 8068 non-null object \ndtypes: float64(2), int64(2), object(7)\nmemory usage: 693.5+ KB\n", "summary": "{\"ID\": {\"count\": 8068.0, \"mean\": 463479.21455131384, \"std\": 2595.3812317546913, \"min\": 458982.0, \"25%\": 461240.75, \"50%\": 463472.5, \"75%\": 465744.25, \"max\": 467974.0}, \"Age\": {\"count\": 8068.0, \"mean\": 43.46690629647992, \"std\": 16.711696318721156, \"min\": 18.0, \"25%\": 30.0, \"50%\": 40.0, \"75%\": 53.0, \"max\": 89.0}, \"Work_Experience\": {\"count\": 7239.0, \"mean\": 2.641663213150988, \"std\": 3.406762985458083, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 4.0, \"max\": 14.0}, \"Family_Size\": {\"count\": 7733.0, \"mean\": 2.85012285012285, \"std\": 1.5314132820253756, \"min\": 1.0, \"25%\": 2.0, \"50%\": 3.0, \"75%\": 4.0, \"max\": 9.0}}", "examples": "{\"ID\":{\"0\":462809,\"1\":462643,\"2\":466315,\"3\":461735},\"Gender\":{\"0\":\"Male\",\"1\":\"Female\",\"2\":\"Female\",\"3\":\"Male\"},\"Ever_Married\":{\"0\":\"No\",\"1\":\"Yes\",\"2\":\"Yes\",\"3\":\"Yes\"},\"Age\":{\"0\":22,\"1\":38,\"2\":67,\"3\":67},\"Graduated\":{\"0\":\"No\",\"1\":\"Yes\",\"2\":\"Yes\",\"3\":\"Yes\"},\"Profession\":{\"0\":\"Healthcare\",\"1\":\"Engineer\",\"2\":\"Engineer\",\"3\":\"Lawyer\"},\"Work_Experience\":{\"0\":1.0,\"1\":null,\"2\":1.0,\"3\":0.0},\"Spending_Score\":{\"0\":\"Low\",\"1\":\"Average\",\"2\":\"Low\",\"3\":\"High\"},\"Family_Size\":{\"0\":4.0,\"1\":3.0,\"2\":1.0,\"3\":2.0},\"Var_1\":{\"0\":\"Cat_4\",\"1\":\"Cat_4\",\"2\":\"Cat_6\",\"3\":\"Cat_6\"},\"Segmentation\":{\"0\":\"D\",\"1\":\"A\",\"2\":\"B\",\"3\":\"B\"}}"}}, {"customer/Test.csv": {"column_names": "[\"ID\", \"Gender\", \"Ever_Married\", \"Age\", \"Graduated\", \"Profession\", \"Work_Experience\", \"Spending_Score\", \"Family_Size\", \"Var_1\"]", "column_data_types": "{\"ID\": \"int64\", \"Gender\": \"object\", \"Ever_Married\": \"object\", \"Age\": \"int64\", \"Graduated\": \"object\", \"Profession\": \"object\", \"Work_Experience\": \"float64\", \"Spending_Score\": \"object\", \"Family_Size\": \"float64\", \"Var_1\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2627 entries, 0 to 2626\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ID 2627 non-null int64 \n 1 Gender 2627 non-null object \n 2 Ever_Married 2577 non-null object \n 3 Age 2627 non-null int64 \n 4 Graduated 2603 non-null object \n 5 Profession 2589 non-null object \n 6 Work_Experience 2358 non-null float64\n 7 Spending_Score 2627 non-null object \n 8 Family_Size 2514 non-null float64\n 9 Var_1 2595 non-null object \ndtypes: float64(2), int64(2), object(6)\nmemory usage: 205.4+ KB\n", "summary": "{\"ID\": {\"count\": 2627.0, \"mean\": 463433.91891891893, \"std\": 2618.245697750486, \"min\": 458989.0, \"25%\": 461162.5, \"50%\": 463379.0, \"75%\": 465696.0, \"max\": 467968.0}, \"Age\": {\"count\": 2627.0, \"mean\": 43.649790635706125, \"std\": 16.96701480933839, \"min\": 18.0, \"25%\": 30.0, \"50%\": 41.0, \"75%\": 53.0, \"max\": 89.0}, \"Work_Experience\": {\"count\": 2358.0, \"mean\": 2.552586938083121, \"std\": 3.3410941543687604, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 4.0, \"max\": 14.0}, \"Family_Size\": {\"count\": 2514.0, \"mean\": 2.8253778838504378, \"std\": 1.5519061217830905, \"min\": 1.0, \"25%\": 2.0, \"50%\": 2.0, \"75%\": 4.0, \"max\": 9.0}}", "examples": "{\"ID\":{\"0\":458989,\"1\":458994,\"2\":458996,\"3\":459000},\"Gender\":{\"0\":\"Female\",\"1\":\"Male\",\"2\":\"Female\",\"3\":\"Male\"},\"Ever_Married\":{\"0\":\"Yes\",\"1\":\"Yes\",\"2\":\"Yes\",\"3\":\"Yes\"},\"Age\":{\"0\":36,\"1\":37,\"2\":69,\"3\":59},\"Graduated\":{\"0\":\"Yes\",\"1\":\"Yes\",\"2\":\"No\",\"3\":\"No\"},\"Profession\":{\"0\":\"Engineer\",\"1\":\"Healthcare\",\"2\":null,\"3\":\"Executive\"},\"Work_Experience\":{\"0\":0.0,\"1\":8.0,\"2\":0.0,\"3\":11.0},\"Spending_Score\":{\"0\":\"Low\",\"1\":\"Average\",\"2\":\"Low\",\"3\":\"High\"},\"Family_Size\":{\"0\":1.0,\"1\":4.0,\"2\":1.0,\"3\":2.0},\"Var_1\":{\"0\":\"Cat_6\",\"1\":\"Cat_6\",\"2\":\"Cat_6\",\"3\":\"Cat_6\"}}"}}]
true
2
<start_data_description><data_path>customer/Train.csv: <column_names> ['ID', 'Gender', 'Ever_Married', 'Age', 'Graduated', 'Profession', 'Work_Experience', 'Spending_Score', 'Family_Size', 'Var_1', 'Segmentation'] <column_types> {'ID': 'int64', 'Gender': 'object', 'Ever_Married': 'object', 'Age': 'int64', 'Graduated': 'object', 'Profession': 'object', 'Work_Experience': 'float64', 'Spending_Score': 'object', 'Family_Size': 'float64', 'Var_1': 'object', 'Segmentation': 'object'} <dataframe_Summary> {'ID': {'count': 8068.0, 'mean': 463479.21455131384, 'std': 2595.3812317546913, 'min': 458982.0, '25%': 461240.75, '50%': 463472.5, '75%': 465744.25, 'max': 467974.0}, 'Age': {'count': 8068.0, 'mean': 43.46690629647992, 'std': 16.711696318721156, 'min': 18.0, '25%': 30.0, '50%': 40.0, '75%': 53.0, 'max': 89.0}, 'Work_Experience': {'count': 7239.0, 'mean': 2.641663213150988, 'std': 3.406762985458083, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 4.0, 'max': 14.0}, 'Family_Size': {'count': 7733.0, 'mean': 2.85012285012285, 'std': 1.5314132820253756, 'min': 1.0, '25%': 2.0, '50%': 3.0, '75%': 4.0, 'max': 9.0}} <dataframe_info> RangeIndex: 8068 entries, 0 to 8067 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 ID 8068 non-null int64 1 Gender 8068 non-null object 2 Ever_Married 7928 non-null object 3 Age 8068 non-null int64 4 Graduated 7990 non-null object 5 Profession 7944 non-null object 6 Work_Experience 7239 non-null float64 7 Spending_Score 8068 non-null object 8 Family_Size 7733 non-null float64 9 Var_1 7992 non-null object 10 Segmentation 8068 non-null object dtypes: float64(2), int64(2), object(7) memory usage: 693.5+ KB <some_examples> {'ID': {'0': 462809, '1': 462643, '2': 466315, '3': 461735}, 'Gender': {'0': 'Male', '1': 'Female', '2': 'Female', '3': 'Male'}, 'Ever_Married': {'0': 'No', '1': 'Yes', '2': 'Yes', '3': 'Yes'}, 'Age': {'0': 22, '1': 38, '2': 67, '3': 67}, 'Graduated': {'0': 'No', '1': 'Yes', '2': 'Yes', '3': 'Yes'}, 'Profession': {'0': 'Healthcare', '1': 'Engineer', '2': 'Engineer', '3': 'Lawyer'}, 'Work_Experience': {'0': 1.0, '1': None, '2': 1.0, '3': 0.0}, 'Spending_Score': {'0': 'Low', '1': 'Average', '2': 'Low', '3': 'High'}, 'Family_Size': {'0': 4.0, '1': 3.0, '2': 1.0, '3': 2.0}, 'Var_1': {'0': 'Cat_4', '1': 'Cat_4', '2': 'Cat_6', '3': 'Cat_6'}, 'Segmentation': {'0': 'D', '1': 'A', '2': 'B', '3': 'B'}} <end_description> <start_data_description><data_path>customer/Test.csv: <column_names> ['ID', 'Gender', 'Ever_Married', 'Age', 'Graduated', 'Profession', 'Work_Experience', 'Spending_Score', 'Family_Size', 'Var_1'] <column_types> {'ID': 'int64', 'Gender': 'object', 'Ever_Married': 'object', 'Age': 'int64', 'Graduated': 'object', 'Profession': 'object', 'Work_Experience': 'float64', 'Spending_Score': 'object', 'Family_Size': 'float64', 'Var_1': 'object'} <dataframe_Summary> {'ID': {'count': 2627.0, 'mean': 463433.91891891893, 'std': 2618.245697750486, 'min': 458989.0, '25%': 461162.5, '50%': 463379.0, '75%': 465696.0, 'max': 467968.0}, 'Age': {'count': 2627.0, 'mean': 43.649790635706125, 'std': 16.96701480933839, 'min': 18.0, '25%': 30.0, '50%': 41.0, '75%': 53.0, 'max': 89.0}, 'Work_Experience': {'count': 2358.0, 'mean': 2.552586938083121, 'std': 3.3410941543687604, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 4.0, 'max': 14.0}, 'Family_Size': {'count': 2514.0, 'mean': 2.8253778838504378, 'std': 1.5519061217830905, 'min': 1.0, '25%': 2.0, '50%': 2.0, '75%': 4.0, 'max': 9.0}} <dataframe_info> RangeIndex: 2627 entries, 0 to 2626 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 ID 2627 non-null int64 1 Gender 2627 non-null object 2 Ever_Married 2577 non-null object 3 Age 2627 non-null int64 4 Graduated 2603 non-null object 5 Profession 2589 non-null object 6 Work_Experience 2358 non-null float64 7 Spending_Score 2627 non-null object 8 Family_Size 2514 non-null float64 9 Var_1 2595 non-null object dtypes: float64(2), int64(2), object(6) memory usage: 205.4+ KB <some_examples> {'ID': {'0': 458989, '1': 458994, '2': 458996, '3': 459000}, 'Gender': {'0': 'Female', '1': 'Male', '2': 'Female', '3': 'Male'}, 'Ever_Married': {'0': 'Yes', '1': 'Yes', '2': 'Yes', '3': 'Yes'}, 'Age': {'0': 36, '1': 37, '2': 69, '3': 59}, 'Graduated': {'0': 'Yes', '1': 'Yes', '2': 'No', '3': 'No'}, 'Profession': {'0': 'Engineer', '1': 'Healthcare', '2': None, '3': 'Executive'}, 'Work_Experience': {'0': 0.0, '1': 8.0, '2': 0.0, '3': 11.0}, 'Spending_Score': {'0': 'Low', '1': 'Average', '2': 'Low', '3': 'High'}, 'Family_Size': {'0': 1.0, '1': 4.0, '2': 1.0, '3': 2.0}, 'Var_1': {'0': 'Cat_6', '1': 'Cat_6', '2': 'Cat_6', '3': 'Cat_6'}} <end_description>
1,255
0
2,853
1,255
129871076
from fasteda import fast_eda from openTSNE import TSNE import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") train["EJ"] = train["EJ"].map({"A": 0, "B": 1}).astype(float) y_tsne = train.dropna()["Class"].values train_tsne = train[train.select_dtypes(include=["float64"]).columns].values def scale_data(df): scaler = StandardScaler() float_cols = df.select_dtypes(include=["float64"]) scaled_data = scaler.fit_transform(df[list(float_cols)]) train[list(float_cols)] = scaled_data return df test.head(1).T # ### [fasteda](https://github.com/Matt-OP/fasteda) on the train set with target = "Class" fast_eda(train, target="Class") top_n_corr_features = 25 corr_matrix = train.corr(numeric_only=True).abs() corr_pairs = corr_matrix.unstack().sort_values(ascending=False) corr_pairs = corr_pairs[ corr_pairs.index.get_level_values(0) != corr_pairs.index.get_level_values(1) ] top_n_corr_pairs = corr_pairs[: top_n_corr_features * 2] duplicate_pairs = set() feature_tuple, correlation = [], [] for pair in top_n_corr_pairs.index: if (pair[0], pair[1]) not in duplicate_pairs and ( pair[1], pair[0], ) not in duplicate_pairs: feature_tuple.append((pair[0], pair[1])) correlation.append(round(top_n_corr_pairs[pair], 3)) duplicate_pairs.add((pair[0], pair[1])) # ### Visualizing the top 25 feature pairs with the highest correlation plt.style.use("dark_background") plt.figure(figsize=(10, 12)) ax = sns.barplot( x=correlation, y=[str(feats) for feats in feature_tuple], palette=sns.color_palette("Blues_r", n_colors=len(feature_tuple)), width=0.7, linewidth=1.2, edgecolor="#FFFFFF", ) for container in ax.containers: ax.bar_label(container, size=10, padding=5) plt.title(f"Top {top_n_corr_features} feature pairs with highest Pearson correlation") plt.xlim(0, 1.05) plt.xlabel("Pearson correlation") plt.ylabel("Feature pairs") plt.grid(False) plt.show() unique_counts = [np.unique(train[col]).size for col in train.columns[1:-1]] name_count_pairs = [ (col, unique_counts[i]) for i, col in enumerate(train.columns[1:-1]) ] sorted_pairs = sorted(name_count_pairs, key=lambda x: x[1], reverse=True) # ### Visualizing the number of unique values for each feature plt.style.use("dark_background") plt.figure(figsize=(10, 14)) ax = sns.barplot( x=[pair[1] for pair in sorted_pairs], y=[pair[0] for pair in sorted_pairs], palette=sns.color_palette("Reds_r", n_colors=len(sorted_pairs)), width=0.6, linewidth=1, edgecolor="#FFFFFF", ) for container in ax.containers: ax.bar_label(container, size=10, padding=5) plt.title(f"Unique counts for each feature | n rows in train = {len(train)}") plt.xlabel("Number of unique samples") plt.ylabel("Features") plt.grid(False) plt.show() # ### Value count plots of the features in greeks.csv colors = sns.color_palette("viridis") for i, col in enumerate(greeks.columns[1:-1]): plt.figure(figsize=(8, 6)) ax = ( greeks[col] .value_counts(ascending=True) .plot.barh(color=colors[i], edgecolor="#FFFFFF") ) for container in ax.containers: ax.bar_label(container, size=8, padding=3) plt.title(f"Value counts of {col} in greeks.csv") plt.grid(False) plt.show() train_2D = TSNE().fit(pd.DataFrame(train_tsne).dropna()) # ### TSNE visualization of the training data plt.style.use("dark_background") plt.figure(figsize=(12, 8)) ax = sns.scatterplot( x=train_2D[:, 0], y=train_2D[:, 1], s=12, hue=y_tsne, linewidth=0, palette=["#33cc33", "#ff0000"], ) plt.legend(title="Class") plt.grid(False) plt.title("TSNE plot | train data") plt.show() ss = StandardScaler() X_train_ss = pd.DataFrame(ss.fit_transform(train_tsne)).dropna() pca = PCA(2) X_train_ss2 = pca.fit_transform(X_train_ss) pca2 = pd.DataFrame( { "PCA1": X_train_ss2[:, 0], "PCA2": X_train_ss2[:, 1], "Class": train.dropna()["Class"], } ) # ### PCA visualization of the training data plt.style.use("dark_background") plt.figure(figsize=(12, 8)) ax = sns.scatterplot( data=pca2, x="PCA1", y="PCA2", s=10, hue="Class", linewidth=0, palette=["#33cc33", "#cc0000"], ) plt.grid(False) plt.title("Data scaled with StandardScaler()") sns.move_legend(ax, "upper left", bbox_to_anchor=(1, 1)) # plt.xlim(-4, 8) # plt.ylim(-4, 8) plt.show() train["is_train"] = 1 test["is_train"] = 0 train_test = pd.concat([train, test], ignore_index=True)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/871/129871076.ipynb
null
null
[{"Id": 129871076, "ScriptId": 38412154, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10590800, "CreationDate": "05/17/2023 05:13:18", "VersionNumber": 4.0, "Title": "ICR - Identifying Age-Related Conditions EDA \u2728", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 147.0, "LinesInsertedFromPrevious": 10.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 137.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 10}]
null
null
null
null
from fasteda import fast_eda from openTSNE import TSNE import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv") test = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv") greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv") train["EJ"] = train["EJ"].map({"A": 0, "B": 1}).astype(float) y_tsne = train.dropna()["Class"].values train_tsne = train[train.select_dtypes(include=["float64"]).columns].values def scale_data(df): scaler = StandardScaler() float_cols = df.select_dtypes(include=["float64"]) scaled_data = scaler.fit_transform(df[list(float_cols)]) train[list(float_cols)] = scaled_data return df test.head(1).T # ### [fasteda](https://github.com/Matt-OP/fasteda) on the train set with target = "Class" fast_eda(train, target="Class") top_n_corr_features = 25 corr_matrix = train.corr(numeric_only=True).abs() corr_pairs = corr_matrix.unstack().sort_values(ascending=False) corr_pairs = corr_pairs[ corr_pairs.index.get_level_values(0) != corr_pairs.index.get_level_values(1) ] top_n_corr_pairs = corr_pairs[: top_n_corr_features * 2] duplicate_pairs = set() feature_tuple, correlation = [], [] for pair in top_n_corr_pairs.index: if (pair[0], pair[1]) not in duplicate_pairs and ( pair[1], pair[0], ) not in duplicate_pairs: feature_tuple.append((pair[0], pair[1])) correlation.append(round(top_n_corr_pairs[pair], 3)) duplicate_pairs.add((pair[0], pair[1])) # ### Visualizing the top 25 feature pairs with the highest correlation plt.style.use("dark_background") plt.figure(figsize=(10, 12)) ax = sns.barplot( x=correlation, y=[str(feats) for feats in feature_tuple], palette=sns.color_palette("Blues_r", n_colors=len(feature_tuple)), width=0.7, linewidth=1.2, edgecolor="#FFFFFF", ) for container in ax.containers: ax.bar_label(container, size=10, padding=5) plt.title(f"Top {top_n_corr_features} feature pairs with highest Pearson correlation") plt.xlim(0, 1.05) plt.xlabel("Pearson correlation") plt.ylabel("Feature pairs") plt.grid(False) plt.show() unique_counts = [np.unique(train[col]).size for col in train.columns[1:-1]] name_count_pairs = [ (col, unique_counts[i]) for i, col in enumerate(train.columns[1:-1]) ] sorted_pairs = sorted(name_count_pairs, key=lambda x: x[1], reverse=True) # ### Visualizing the number of unique values for each feature plt.style.use("dark_background") plt.figure(figsize=(10, 14)) ax = sns.barplot( x=[pair[1] for pair in sorted_pairs], y=[pair[0] for pair in sorted_pairs], palette=sns.color_palette("Reds_r", n_colors=len(sorted_pairs)), width=0.6, linewidth=1, edgecolor="#FFFFFF", ) for container in ax.containers: ax.bar_label(container, size=10, padding=5) plt.title(f"Unique counts for each feature | n rows in train = {len(train)}") plt.xlabel("Number of unique samples") plt.ylabel("Features") plt.grid(False) plt.show() # ### Value count plots of the features in greeks.csv colors = sns.color_palette("viridis") for i, col in enumerate(greeks.columns[1:-1]): plt.figure(figsize=(8, 6)) ax = ( greeks[col] .value_counts(ascending=True) .plot.barh(color=colors[i], edgecolor="#FFFFFF") ) for container in ax.containers: ax.bar_label(container, size=8, padding=3) plt.title(f"Value counts of {col} in greeks.csv") plt.grid(False) plt.show() train_2D = TSNE().fit(pd.DataFrame(train_tsne).dropna()) # ### TSNE visualization of the training data plt.style.use("dark_background") plt.figure(figsize=(12, 8)) ax = sns.scatterplot( x=train_2D[:, 0], y=train_2D[:, 1], s=12, hue=y_tsne, linewidth=0, palette=["#33cc33", "#ff0000"], ) plt.legend(title="Class") plt.grid(False) plt.title("TSNE plot | train data") plt.show() ss = StandardScaler() X_train_ss = pd.DataFrame(ss.fit_transform(train_tsne)).dropna() pca = PCA(2) X_train_ss2 = pca.fit_transform(X_train_ss) pca2 = pd.DataFrame( { "PCA1": X_train_ss2[:, 0], "PCA2": X_train_ss2[:, 1], "Class": train.dropna()["Class"], } ) # ### PCA visualization of the training data plt.style.use("dark_background") plt.figure(figsize=(12, 8)) ax = sns.scatterplot( data=pca2, x="PCA1", y="PCA2", s=10, hue="Class", linewidth=0, palette=["#33cc33", "#cc0000"], ) plt.grid(False) plt.title("Data scaled with StandardScaler()") sns.move_legend(ax, "upper left", bbox_to_anchor=(1, 1)) # plt.xlim(-4, 8) # plt.ylim(-4, 8) plt.show() train["is_train"] = 1 test["is_train"] = 0 train_test = pd.concat([train, test], ignore_index=True)
false
0
1,692
10
1,692
1,692
129871682
<jupyter_start><jupyter_text>Breast Cancer Dataset ### Description: Breast cancer is the most common cancer amongst women in the world. It accounts for 25% of all cancer cases, and affected over 2.1 Million people in 2015 alone. It starts when cells in the breast begin to grow out of control. These cells usually form tumors that can be seen via X-ray or felt as lumps in the breast area. The key challenges against it’s detection is how to classify tumors into malignant (cancerous) or benign(non cancerous). We ask you to complete the analysis of classifying these tumors using machine learning (with SVMs) and the Breast Cancer Wisconsin (Diagnostic) Dataset. Kaggle dataset identifier: breast-cancer-dataset <jupyter_code>import pandas as pd df = pd.read_csv('breast-cancer-dataset/breast-cancer.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 569 entries, 0 to 568 Data columns (total 32 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 569 non-null int64 1 diagnosis 569 non-null object 2 radius_mean 569 non-null float64 3 texture_mean 569 non-null float64 4 perimeter_mean 569 non-null float64 5 area_mean 569 non-null float64 6 smoothness_mean 569 non-null float64 7 compactness_mean 569 non-null float64 8 concavity_mean 569 non-null float64 9 concave points_mean 569 non-null float64 10 symmetry_mean 569 non-null float64 11 fractal_dimension_mean 569 non-null float64 12 radius_se 569 non-null float64 13 texture_se 569 non-null float64 14 perimeter_se 569 non-null float64 15 area_se 569 non-null float64 16 smoothness_se 569 non-null float64 17 compactness_se 569 non-null float64 18 concavity_se 569 non-null float64 19 concave points_se 569 non-null float64 20 symmetry_se 569 non-null float64 21 fractal_dimension_se 569 non-null float64 22 radius_worst 569 non-null float64 23 texture_worst 569 non-null float64 24 perimeter_worst 569 non-null float64 25 area_worst 569 non-null float64 26 smoothness_worst 569 non-null float64 27 compactness_worst 569 non-null float64 28 concavity_worst 569 non-null float64 29 concave points_worst 569 non-null float64 30 symmetry_worst 569 non-null float64 31 fractal_dimension_worst 569 non-null float64 dtypes: float64(30), int64(1), object(1) memory usage: 142.4+ KB <jupyter_text>Examples: { "id": 842302, "diagnosis": "M", "radius_mean": 17.99, "texture_mean": 10.38, "perimeter_mean": 122.8, "area_mean": 1001.0, "smoothness_mean": 0.1184, "compactness_mean": 0.2776, "concavity_mean": 0.30010000000000003, "concave points_mean": 0.1471, "symmetry_mean": 0.2419, "fractal_dimension_mean": 0.07871, "radius_se": 1.095, "texture_se": 0.9053, "perimeter_se": 8.589, "area_se": 153.4, "smoothness_se": 0.006398999999999999, "compactness_se": 0.04904000000000001, "concavity_se": 0.05373000000000001, "concave points_se": 0.015870000000000002, "...": "and 12 more columns" } { "id": 842517, "diagnosis": "M", "radius_mean": 20.57, "texture_mean": 17.77, "perimeter_mean": 132.9, "area_mean": 1326.0, "smoothness_mean": 0.08474000000000001, "compactness_mean": 0.07864, "concavity_mean": 0.0869, "concave points_mean": 0.07017000000000001, "symmetry_mean": 0.1812, "fractal_dimension_mean": 0.056670000000000005, "radius_se": 0.5435, "texture_se": 0.7339, "perimeter_se": 3.398, "area_se": 74.08, "smoothness_se": 0.005225, "compactness_se": 0.013080000000000001, "concavity_se": 0.018600000000000002, "concave points_se": 0.0134, "...": "and 12 more columns" } { "id": 84300903, "diagnosis": "M", "radius_mean": 19.69, "texture_mean": 21.25, "perimeter_mean": 130.0, "area_mean": 1203.0, "smoothness_mean": 0.1096, "compactness_mean": 0.15990000000000001, "concavity_mean": 0.19740000000000002, "concave points_mean": 0.1279, "symmetry_mean": 0.2069, "fractal_dimension_mean": 0.05999, "radius_se": 0.7456, "texture_se": 0.7869, "perimeter_se": 4.585, "area_se": 94.03, "smoothness_se": 0.00615, "compactness_se": 0.040060000000000005, "concavity_se": 0.03832, "concave points_se": 0.02058, "...": "and 12 more columns" } { "id": 84348301, "diagnosis": "M", "radius_mean": 11.42, "texture_mean": 20.38, "perimeter_mean": 77.58, "area_mean": 386.1, "smoothness_mean": 0.14250000000000002, "compactness_mean": 0.28390000000000004, "concavity_mean": 0.2414, "concave points_mean": 0.1052, "symmetry_mean": 0.2597, "fractal_dimension_mean": 0.09744000000000001, "radius_se": 0.49560000000000004, "texture_se": 1.156, "perimeter_se": 3.445, "area_se": 27.23, "smoothness_se": 0.00911, "compactness_se": 0.07458000000000001, "concavity_se": 0.05661000000000001, "concave points_se": 0.018670000000000003, "...": "and 12 more columns" } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt data = pd.read_csv("/kaggle/input/breast-cancer-dataset/breast-cancer.csv") data.head() data.shape data.info() data.isnull().sum() data["diagnosis"].value_counts() # # EDA plt.figure(figsize=(6, 4)) plt.bar( data["diagnosis"].value_counts().keys(), data["diagnosis"].value_counts(), color=["green", "red"], ) plt.show() data.head() data.dtypes from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() data.iloc[:, 1] = encoder.fit_transform(data.iloc[:, 1].values) data.iloc[:, 1] sns.pairplot(data.iloc[:, 1:6], hue="diagnosis") data.iloc[:, 1:14].corr() plt.figure(figsize=(12, 8)) sns.heatmap(data.iloc[:, 1:13].corr(), annot=True, fmt=".0%") X = data.iloc[:, 2:31].values y = data.iloc[:, 1].values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=0 ) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.fit_transform(X_test) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(X_train, y_train) lr.score(X_train, y_train) from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(X_train, y_train) dtc.score(X_train, y_train) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() rfc.fit(X_train, y_train) rfc.score(X_train, y_train) from sklearn.metrics import confusion_matrix # cm = confusion_matrix(y_test,dtc.predict(X_test)) cm = confusion_matrix(y_test, rfc.predict(X_test)) cm print("accuracy=", (86 + 52) / (86 + 4 + 1 + 52)) prediction = rfc.predict(X_test) prediction y_test
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/871/129871682.ipynb
breast-cancer-dataset
yasserh
[{"Id": 129871682, "ScriptId": 38599976, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12476460, "CreationDate": "05/17/2023 05:20:23", "VersionNumber": 2.0, "Title": "Breast_Cancer_Analysis", "EvaluationDate": "05/17/2023", "IsChange": false, "TotalLines": 100.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 100.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 10}]
[{"Id": 186272656, "KernelVersionId": 129871682, "SourceDatasetVersionId": 2984728}]
[{"Id": 2984728, "DatasetId": 1829286, "DatasourceVersionId": 3032465, "CreatorUserId": 8833583, "LicenseName": "CC0: Public Domain", "CreationDate": "12/29/2021 19:07:20", "VersionNumber": 1.0, "Title": "Breast Cancer Dataset", "Slug": "breast-cancer-dataset", "Subtitle": "Binary Classification Prediction for type of Breast Cancer", "Description": "### Description:\n\nBreast cancer is the most common cancer amongst women in the world. It accounts for 25% of all cancer cases, and affected over 2.1 Million people in 2015 alone. It starts when cells in the breast begin to grow out of control. These cells usually form tumors that can be seen via X-ray or felt as lumps in the breast area.\n\nThe key challenges against it\u2019s detection is how to classify tumors into malignant (cancerous) or benign(non cancerous). We ask you to complete the analysis of classifying these tumors using machine learning (with SVMs) and the Breast Cancer Wisconsin (Diagnostic) Dataset.\n\n### Acknowledgements:\nThis dataset has been referred from Kaggle.\n\n### Objective:\n- Understand the Dataset & cleanup (if required).\n- Build classification models to predict whether the cancer type is Malignant or Benign.\n- Also fine-tune the hyperparameters & compare the evaluation metrics of various classification algorithms.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1829286, "CreatorUserId": 8833583, "OwnerUserId": 8833583.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2984728.0, "CurrentDatasourceVersionId": 3032465.0, "ForumId": 1852047, "Type": 2, "CreationDate": "12/29/2021 19:07:20", "LastActivityDate": "12/29/2021", "TotalViews": 170368, "TotalDownloads": 29194, "TotalVotes": 276, "TotalKernels": 138}]
[{"Id": 8833583, "UserName": "yasserh", "DisplayName": "M Yasser H", "RegisterDate": "11/09/2021", "PerformanceTier": 3}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt data = pd.read_csv("/kaggle/input/breast-cancer-dataset/breast-cancer.csv") data.head() data.shape data.info() data.isnull().sum() data["diagnosis"].value_counts() # # EDA plt.figure(figsize=(6, 4)) plt.bar( data["diagnosis"].value_counts().keys(), data["diagnosis"].value_counts(), color=["green", "red"], ) plt.show() data.head() data.dtypes from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() data.iloc[:, 1] = encoder.fit_transform(data.iloc[:, 1].values) data.iloc[:, 1] sns.pairplot(data.iloc[:, 1:6], hue="diagnosis") data.iloc[:, 1:14].corr() plt.figure(figsize=(12, 8)) sns.heatmap(data.iloc[:, 1:13].corr(), annot=True, fmt=".0%") X = data.iloc[:, 2:31].values y = data.iloc[:, 1].values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=0 ) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.fit_transform(X_test) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(X_train, y_train) lr.score(X_train, y_train) from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(X_train, y_train) dtc.score(X_train, y_train) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() rfc.fit(X_train, y_train) rfc.score(X_train, y_train) from sklearn.metrics import confusion_matrix # cm = confusion_matrix(y_test,dtc.predict(X_test)) cm = confusion_matrix(y_test, rfc.predict(X_test)) cm print("accuracy=", (86 + 52) / (86 + 4 + 1 + 52)) prediction = rfc.predict(X_test) prediction y_test
[{"breast-cancer-dataset/breast-cancer.csv": {"column_names": "[\"id\", \"diagnosis\", \"radius_mean\", \"texture_mean\", \"perimeter_mean\", \"area_mean\", \"smoothness_mean\", \"compactness_mean\", \"concavity_mean\", \"concave points_mean\", \"symmetry_mean\", \"fractal_dimension_mean\", \"radius_se\", \"texture_se\", \"perimeter_se\", \"area_se\", \"smoothness_se\", \"compactness_se\", \"concavity_se\", \"concave points_se\", \"symmetry_se\", \"fractal_dimension_se\", \"radius_worst\", \"texture_worst\", \"perimeter_worst\", \"area_worst\", \"smoothness_worst\", \"compactness_worst\", \"concavity_worst\", \"concave points_worst\", \"symmetry_worst\", \"fractal_dimension_worst\"]", "column_data_types": "{\"id\": \"int64\", \"diagnosis\": \"object\", \"radius_mean\": \"float64\", \"texture_mean\": \"float64\", \"perimeter_mean\": \"float64\", \"area_mean\": \"float64\", \"smoothness_mean\": \"float64\", \"compactness_mean\": \"float64\", \"concavity_mean\": \"float64\", \"concave points_mean\": \"float64\", \"symmetry_mean\": \"float64\", \"fractal_dimension_mean\": \"float64\", \"radius_se\": \"float64\", \"texture_se\": \"float64\", \"perimeter_se\": \"float64\", \"area_se\": \"float64\", \"smoothness_se\": \"float64\", \"compactness_se\": \"float64\", \"concavity_se\": \"float64\", \"concave points_se\": \"float64\", \"symmetry_se\": \"float64\", \"fractal_dimension_se\": \"float64\", \"radius_worst\": \"float64\", \"texture_worst\": \"float64\", \"perimeter_worst\": \"float64\", \"area_worst\": \"float64\", \"smoothness_worst\": \"float64\", \"compactness_worst\": \"float64\", \"concavity_worst\": \"float64\", \"concave points_worst\": \"float64\", \"symmetry_worst\": \"float64\", \"fractal_dimension_worst\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 569 entries, 0 to 568\nData columns (total 32 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 569 non-null int64 \n 1 diagnosis 569 non-null object \n 2 radius_mean 569 non-null float64\n 3 texture_mean 569 non-null float64\n 4 perimeter_mean 569 non-null float64\n 5 area_mean 569 non-null float64\n 6 smoothness_mean 569 non-null float64\n 7 compactness_mean 569 non-null float64\n 8 concavity_mean 569 non-null float64\n 9 concave points_mean 569 non-null float64\n 10 symmetry_mean 569 non-null float64\n 11 fractal_dimension_mean 569 non-null float64\n 12 radius_se 569 non-null float64\n 13 texture_se 569 non-null float64\n 14 perimeter_se 569 non-null float64\n 15 area_se 569 non-null float64\n 16 smoothness_se 569 non-null float64\n 17 compactness_se 569 non-null float64\n 18 concavity_se 569 non-null float64\n 19 concave points_se 569 non-null float64\n 20 symmetry_se 569 non-null float64\n 21 fractal_dimension_se 569 non-null float64\n 22 radius_worst 569 non-null float64\n 23 texture_worst 569 non-null float64\n 24 perimeter_worst 569 non-null float64\n 25 area_worst 569 non-null float64\n 26 smoothness_worst 569 non-null float64\n 27 compactness_worst 569 non-null float64\n 28 concavity_worst 569 non-null float64\n 29 concave points_worst 569 non-null float64\n 30 symmetry_worst 569 non-null float64\n 31 fractal_dimension_worst 569 non-null float64\ndtypes: float64(30), int64(1), object(1)\nmemory usage: 142.4+ KB\n", "summary": "{\"id\": {\"count\": 569.0, \"mean\": 30371831.432337433, \"std\": 125020585.61222365, \"min\": 8670.0, \"25%\": 869218.0, \"50%\": 906024.0, \"75%\": 8813129.0, \"max\": 911320502.0}, \"radius_mean\": {\"count\": 569.0, \"mean\": 14.127291739894552, \"std\": 3.5240488262120775, \"min\": 6.981, \"25%\": 11.7, \"50%\": 13.37, \"75%\": 15.78, \"max\": 28.11}, \"texture_mean\": {\"count\": 569.0, \"mean\": 19.289648506151142, \"std\": 4.301035768166949, \"min\": 9.71, \"25%\": 16.17, \"50%\": 18.84, \"75%\": 21.8, \"max\": 39.28}, \"perimeter_mean\": {\"count\": 569.0, \"mean\": 91.96903339191564, \"std\": 24.298981038754906, \"min\": 43.79, \"25%\": 75.17, \"50%\": 86.24, \"75%\": 104.1, \"max\": 188.5}, \"area_mean\": {\"count\": 569.0, \"mean\": 654.8891036906855, \"std\": 351.914129181653, \"min\": 143.5, \"25%\": 420.3, \"50%\": 551.1, \"75%\": 782.7, \"max\": 2501.0}, \"smoothness_mean\": {\"count\": 569.0, \"mean\": 0.0963602811950791, \"std\": 0.01406412813767362, \"min\": 0.05263, \"25%\": 0.08637, \"50%\": 0.09587, \"75%\": 0.1053, \"max\": 0.1634}, \"compactness_mean\": {\"count\": 569.0, \"mean\": 0.10434098418277679, \"std\": 0.052812757932512194, \"min\": 0.01938, \"25%\": 0.06492, \"50%\": 0.09263, \"75%\": 0.1304, \"max\": 0.3454}, \"concavity_mean\": {\"count\": 569.0, \"mean\": 0.0887993158172232, \"std\": 0.07971980870789348, \"min\": 0.0, \"25%\": 0.02956, \"50%\": 0.06154, \"75%\": 0.1307, \"max\": 0.4268}, \"concave points_mean\": {\"count\": 569.0, \"mean\": 0.04891914586994728, \"std\": 0.038802844859153605, \"min\": 0.0, \"25%\": 0.02031, \"50%\": 0.0335, \"75%\": 0.074, \"max\": 0.2012}, \"symmetry_mean\": {\"count\": 569.0, \"mean\": 0.18116186291739894, \"std\": 0.027414281336035715, \"min\": 0.106, \"25%\": 0.1619, \"50%\": 0.1792, \"75%\": 0.1957, \"max\": 0.304}, \"fractal_dimension_mean\": {\"count\": 569.0, \"mean\": 0.06279760984182776, \"std\": 0.007060362795084459, \"min\": 0.04996, \"25%\": 0.0577, \"50%\": 0.06154, \"75%\": 0.06612, \"max\": 0.09744}, \"radius_se\": {\"count\": 569.0, \"mean\": 0.40517205623901575, \"std\": 0.2773127329861039, \"min\": 0.1115, \"25%\": 0.2324, \"50%\": 0.3242, \"75%\": 0.4789, \"max\": 2.873}, \"texture_se\": {\"count\": 569.0, \"mean\": 1.2168534270650264, \"std\": 0.5516483926172023, \"min\": 0.3602, \"25%\": 0.8339, \"50%\": 1.108, \"75%\": 1.474, \"max\": 4.885}, \"perimeter_se\": {\"count\": 569.0, \"mean\": 2.8660592267135327, \"std\": 2.0218545540421076, \"min\": 0.757, \"25%\": 1.606, \"50%\": 2.287, \"75%\": 3.357, \"max\": 21.98}, \"area_se\": {\"count\": 569.0, \"mean\": 40.337079086116, \"std\": 45.49100551613181, \"min\": 6.802, \"25%\": 17.85, \"50%\": 24.53, \"75%\": 45.19, \"max\": 542.2}, \"smoothness_se\": {\"count\": 569.0, \"mean\": 0.007040978910369069, \"std\": 0.0030025179438390656, \"min\": 0.001713, \"25%\": 0.005169, \"50%\": 0.00638, \"75%\": 0.008146, \"max\": 0.03113}, \"compactness_se\": {\"count\": 569.0, \"mean\": 0.025478138840070295, \"std\": 0.017908179325677388, \"min\": 0.002252, \"25%\": 0.01308, \"50%\": 0.02045, \"75%\": 0.03245, \"max\": 0.1354}, \"concavity_se\": {\"count\": 569.0, \"mean\": 0.03189371634446397, \"std\": 0.03018606032298841, \"min\": 0.0, \"25%\": 0.01509, \"50%\": 0.02589, \"75%\": 0.04205, \"max\": 0.396}, \"concave points_se\": {\"count\": 569.0, \"mean\": 0.011796137082601054, \"std\": 0.006170285174046869, \"min\": 0.0, \"25%\": 0.007638, \"50%\": 0.01093, \"75%\": 0.01471, \"max\": 0.05279}, \"symmetry_se\": {\"count\": 569.0, \"mean\": 0.02054229876977153, \"std\": 0.008266371528798399, \"min\": 0.007882, \"25%\": 0.01516, \"50%\": 0.01873, \"75%\": 0.02348, \"max\": 0.07895}, \"fractal_dimension_se\": {\"count\": 569.0, \"mean\": 0.0037949038664323374, \"std\": 0.002646070967089195, \"min\": 0.0008948, \"25%\": 0.002248, \"50%\": 0.003187, \"75%\": 0.004558, \"max\": 0.02984}, \"radius_worst\": {\"count\": 569.0, \"mean\": 16.269189806678387, \"std\": 4.833241580469323, \"min\": 7.93, \"25%\": 13.01, \"50%\": 14.97, \"75%\": 18.79, \"max\": 36.04}, \"texture_worst\": {\"count\": 569.0, \"mean\": 25.677223198594024, \"std\": 6.146257623038319, \"min\": 12.02, \"25%\": 21.08, \"50%\": 25.41, \"75%\": 29.72, \"max\": 49.54}, \"perimeter_worst\": {\"count\": 569.0, \"mean\": 107.26121265377857, \"std\": 33.602542269036356, \"min\": 50.41, \"25%\": 84.11, \"50%\": 97.66, \"75%\": 125.4, \"max\": 251.2}, \"area_worst\": {\"count\": 569.0, \"mean\": 880.5831282952548, \"std\": 569.356992669949, \"min\": 185.2, \"25%\": 515.3, \"50%\": 686.5, \"75%\": 1084.0, \"max\": 4254.0}, \"smoothness_worst\": {\"count\": 569.0, \"mean\": 0.13236859402460457, \"std\": 0.022832429404835465, \"min\": 0.07117, \"25%\": 0.1166, \"50%\": 0.1313, \"75%\": 0.146, \"max\": 0.2226}, \"compactness_worst\": {\"count\": 569.0, \"mean\": 0.25426504393673116, \"std\": 0.157336488913742, \"min\": 0.02729, \"25%\": 0.1472, \"50%\": 0.2119, \"75%\": 0.3391, \"max\": 1.058}, \"concavity_worst\": {\"count\": 569.0, \"mean\": 0.27218848330404216, \"std\": 0.2086242806081323, \"min\": 0.0, \"25%\": 0.1145, \"50%\": 0.2267, \"75%\": 0.3829, \"max\": 1.252}, \"concave points_worst\": {\"count\": 569.0, \"mean\": 0.11460622319859401, \"std\": 0.06573234119594207, \"min\": 0.0, \"25%\": 0.06493, \"50%\": 0.09993, \"75%\": 0.1614, \"max\": 0.291}, \"symmetry_worst\": {\"count\": 569.0, \"mean\": 0.2900755711775044, \"std\": 0.061867467537518685, \"min\": 0.1565, \"25%\": 0.2504, \"50%\": 0.2822, \"75%\": 0.3179, \"max\": 0.6638}, \"fractal_dimension_worst\": {\"count\": 569.0, \"mean\": 0.0839458172231986, \"std\": 0.018061267348893986, \"min\": 0.05504, \"25%\": 0.07146, \"50%\": 0.08004, \"75%\": 0.09208, \"max\": 0.2075}}", "examples": "{\"id\":{\"0\":842302,\"1\":842517,\"2\":84300903,\"3\":84348301},\"diagnosis\":{\"0\":\"M\",\"1\":\"M\",\"2\":\"M\",\"3\":\"M\"},\"radius_mean\":{\"0\":17.99,\"1\":20.57,\"2\":19.69,\"3\":11.42},\"texture_mean\":{\"0\":10.38,\"1\":17.77,\"2\":21.25,\"3\":20.38},\"perimeter_mean\":{\"0\":122.8,\"1\":132.9,\"2\":130.0,\"3\":77.58},\"area_mean\":{\"0\":1001.0,\"1\":1326.0,\"2\":1203.0,\"3\":386.1},\"smoothness_mean\":{\"0\":0.1184,\"1\":0.08474,\"2\":0.1096,\"3\":0.1425},\"compactness_mean\":{\"0\":0.2776,\"1\":0.07864,\"2\":0.1599,\"3\":0.2839},\"concavity_mean\":{\"0\":0.3001,\"1\":0.0869,\"2\":0.1974,\"3\":0.2414},\"concave points_mean\":{\"0\":0.1471,\"1\":0.07017,\"2\":0.1279,\"3\":0.1052},\"symmetry_mean\":{\"0\":0.2419,\"1\":0.1812,\"2\":0.2069,\"3\":0.2597},\"fractal_dimension_mean\":{\"0\":0.07871,\"1\":0.05667,\"2\":0.05999,\"3\":0.09744},\"radius_se\":{\"0\":1.095,\"1\":0.5435,\"2\":0.7456,\"3\":0.4956},\"texture_se\":{\"0\":0.9053,\"1\":0.7339,\"2\":0.7869,\"3\":1.156},\"perimeter_se\":{\"0\":8.589,\"1\":3.398,\"2\":4.585,\"3\":3.445},\"area_se\":{\"0\":153.4,\"1\":74.08,\"2\":94.03,\"3\":27.23},\"smoothness_se\":{\"0\":0.006399,\"1\":0.005225,\"2\":0.00615,\"3\":0.00911},\"compactness_se\":{\"0\":0.04904,\"1\":0.01308,\"2\":0.04006,\"3\":0.07458},\"concavity_se\":{\"0\":0.05373,\"1\":0.0186,\"2\":0.03832,\"3\":0.05661},\"concave points_se\":{\"0\":0.01587,\"1\":0.0134,\"2\":0.02058,\"3\":0.01867},\"symmetry_se\":{\"0\":0.03003,\"1\":0.01389,\"2\":0.0225,\"3\":0.05963},\"fractal_dimension_se\":{\"0\":0.006193,\"1\":0.003532,\"2\":0.004571,\"3\":0.009208},\"radius_worst\":{\"0\":25.38,\"1\":24.99,\"2\":23.57,\"3\":14.91},\"texture_worst\":{\"0\":17.33,\"1\":23.41,\"2\":25.53,\"3\":26.5},\"perimeter_worst\":{\"0\":184.6,\"1\":158.8,\"2\":152.5,\"3\":98.87},\"area_worst\":{\"0\":2019.0,\"1\":1956.0,\"2\":1709.0,\"3\":567.7},\"smoothness_worst\":{\"0\":0.1622,\"1\":0.1238,\"2\":0.1444,\"3\":0.2098},\"compactness_worst\":{\"0\":0.6656,\"1\":0.1866,\"2\":0.4245,\"3\":0.8663},\"concavity_worst\":{\"0\":0.7119,\"1\":0.2416,\"2\":0.4504,\"3\":0.6869},\"concave points_worst\":{\"0\":0.2654,\"1\":0.186,\"2\":0.243,\"3\":0.2575},\"symmetry_worst\":{\"0\":0.4601,\"1\":0.275,\"2\":0.3613,\"3\":0.6638},\"fractal_dimension_worst\":{\"0\":0.1189,\"1\":0.08902,\"2\":0.08758,\"3\":0.173}}"}}]
true
1
<start_data_description><data_path>breast-cancer-dataset/breast-cancer.csv: <column_names> ['id', 'diagnosis', 'radius_mean', 'texture_mean', 'perimeter_mean', 'area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean', 'concave points_mean', 'symmetry_mean', 'fractal_dimension_mean', 'radius_se', 'texture_se', 'perimeter_se', 'area_se', 'smoothness_se', 'compactness_se', 'concavity_se', 'concave points_se', 'symmetry_se', 'fractal_dimension_se', 'radius_worst', 'texture_worst', 'perimeter_worst', 'area_worst', 'smoothness_worst', 'compactness_worst', 'concavity_worst', 'concave points_worst', 'symmetry_worst', 'fractal_dimension_worst'] <column_types> {'id': 'int64', 'diagnosis': 'object', 'radius_mean': 'float64', 'texture_mean': 'float64', 'perimeter_mean': 'float64', 'area_mean': 'float64', 'smoothness_mean': 'float64', 'compactness_mean': 'float64', 'concavity_mean': 'float64', 'concave points_mean': 'float64', 'symmetry_mean': 'float64', 'fractal_dimension_mean': 'float64', 'radius_se': 'float64', 'texture_se': 'float64', 'perimeter_se': 'float64', 'area_se': 'float64', 'smoothness_se': 'float64', 'compactness_se': 'float64', 'concavity_se': 'float64', 'concave points_se': 'float64', 'symmetry_se': 'float64', 'fractal_dimension_se': 'float64', 'radius_worst': 'float64', 'texture_worst': 'float64', 'perimeter_worst': 'float64', 'area_worst': 'float64', 'smoothness_worst': 'float64', 'compactness_worst': 'float64', 'concavity_worst': 'float64', 'concave points_worst': 'float64', 'symmetry_worst': 'float64', 'fractal_dimension_worst': 'float64'} <dataframe_Summary> {'id': {'count': 569.0, 'mean': 30371831.432337433, 'std': 125020585.61222365, 'min': 8670.0, '25%': 869218.0, '50%': 906024.0, '75%': 8813129.0, 'max': 911320502.0}, 'radius_mean': {'count': 569.0, 'mean': 14.127291739894552, 'std': 3.5240488262120775, 'min': 6.981, '25%': 11.7, '50%': 13.37, '75%': 15.78, 'max': 28.11}, 'texture_mean': {'count': 569.0, 'mean': 19.289648506151142, 'std': 4.301035768166949, 'min': 9.71, '25%': 16.17, '50%': 18.84, '75%': 21.8, 'max': 39.28}, 'perimeter_mean': {'count': 569.0, 'mean': 91.96903339191564, 'std': 24.298981038754906, 'min': 43.79, '25%': 75.17, '50%': 86.24, '75%': 104.1, 'max': 188.5}, 'area_mean': {'count': 569.0, 'mean': 654.8891036906855, 'std': 351.914129181653, 'min': 143.5, '25%': 420.3, '50%': 551.1, '75%': 782.7, 'max': 2501.0}, 'smoothness_mean': {'count': 569.0, 'mean': 0.0963602811950791, 'std': 0.01406412813767362, 'min': 0.05263, '25%': 0.08637, '50%': 0.09587, '75%': 0.1053, 'max': 0.1634}, 'compactness_mean': {'count': 569.0, 'mean': 0.10434098418277679, 'std': 0.052812757932512194, 'min': 0.01938, '25%': 0.06492, '50%': 0.09263, '75%': 0.1304, 'max': 0.3454}, 'concavity_mean': {'count': 569.0, 'mean': 0.0887993158172232, 'std': 0.07971980870789348, 'min': 0.0, '25%': 0.02956, '50%': 0.06154, '75%': 0.1307, 'max': 0.4268}, 'concave points_mean': {'count': 569.0, 'mean': 0.04891914586994728, 'std': 0.038802844859153605, 'min': 0.0, '25%': 0.02031, '50%': 0.0335, '75%': 0.074, 'max': 0.2012}, 'symmetry_mean': {'count': 569.0, 'mean': 0.18116186291739894, 'std': 0.027414281336035715, 'min': 0.106, '25%': 0.1619, '50%': 0.1792, '75%': 0.1957, 'max': 0.304}, 'fractal_dimension_mean': {'count': 569.0, 'mean': 0.06279760984182776, 'std': 0.007060362795084459, 'min': 0.04996, '25%': 0.0577, '50%': 0.06154, '75%': 0.06612, 'max': 0.09744}, 'radius_se': {'count': 569.0, 'mean': 0.40517205623901575, 'std': 0.2773127329861039, 'min': 0.1115, '25%': 0.2324, '50%': 0.3242, '75%': 0.4789, 'max': 2.873}, 'texture_se': {'count': 569.0, 'mean': 1.2168534270650264, 'std': 0.5516483926172023, 'min': 0.3602, '25%': 0.8339, '50%': 1.108, '75%': 1.474, 'max': 4.885}, 'perimeter_se': {'count': 569.0, 'mean': 2.8660592267135327, 'std': 2.0218545540421076, 'min': 0.757, '25%': 1.606, '50%': 2.287, '75%': 3.357, 'max': 21.98}, 'area_se': {'count': 569.0, 'mean': 40.337079086116, 'std': 45.49100551613181, 'min': 6.802, '25%': 17.85, '50%': 24.53, '75%': 45.19, 'max': 542.2}, 'smoothness_se': {'count': 569.0, 'mean': 0.007040978910369069, 'std': 0.0030025179438390656, 'min': 0.001713, '25%': 0.005169, '50%': 0.00638, '75%': 0.008146, 'max': 0.03113}, 'compactness_se': {'count': 569.0, 'mean': 0.025478138840070295, 'std': 0.017908179325677388, 'min': 0.002252, '25%': 0.01308, '50%': 0.02045, '75%': 0.03245, 'max': 0.1354}, 'concavity_se': {'count': 569.0, 'mean': 0.03189371634446397, 'std': 0.03018606032298841, 'min': 0.0, '25%': 0.01509, '50%': 0.02589, '75%': 0.04205, 'max': 0.396}, 'concave points_se': {'count': 569.0, 'mean': 0.011796137082601054, 'std': 0.006170285174046869, 'min': 0.0, '25%': 0.007638, '50%': 0.01093, '75%': 0.01471, 'max': 0.05279}, 'symmetry_se': {'count': 569.0, 'mean': 0.02054229876977153, 'std': 0.008266371528798399, 'min': 0.007882, '25%': 0.01516, '50%': 0.01873, '75%': 0.02348, 'max': 0.07895}, 'fractal_dimension_se': {'count': 569.0, 'mean': 0.0037949038664323374, 'std': 0.002646070967089195, 'min': 0.0008948, '25%': 0.002248, '50%': 0.003187, '75%': 0.004558, 'max': 0.02984}, 'radius_worst': {'count': 569.0, 'mean': 16.269189806678387, 'std': 4.833241580469323, 'min': 7.93, '25%': 13.01, '50%': 14.97, '75%': 18.79, 'max': 36.04}, 'texture_worst': {'count': 569.0, 'mean': 25.677223198594024, 'std': 6.146257623038319, 'min': 12.02, '25%': 21.08, '50%': 25.41, '75%': 29.72, 'max': 49.54}, 'perimeter_worst': {'count': 569.0, 'mean': 107.26121265377857, 'std': 33.602542269036356, 'min': 50.41, '25%': 84.11, '50%': 97.66, '75%': 125.4, 'max': 251.2}, 'area_worst': {'count': 569.0, 'mean': 880.5831282952548, 'std': 569.356992669949, 'min': 185.2, '25%': 515.3, '50%': 686.5, '75%': 1084.0, 'max': 4254.0}, 'smoothness_worst': {'count': 569.0, 'mean': 0.13236859402460457, 'std': 0.022832429404835465, 'min': 0.07117, '25%': 0.1166, '50%': 0.1313, '75%': 0.146, 'max': 0.2226}, 'compactness_worst': {'count': 569.0, 'mean': 0.25426504393673116, 'std': 0.157336488913742, 'min': 0.02729, '25%': 0.1472, '50%': 0.2119, '75%': 0.3391, 'max': 1.058}, 'concavity_worst': {'count': 569.0, 'mean': 0.27218848330404216, 'std': 0.2086242806081323, 'min': 0.0, '25%': 0.1145, '50%': 0.2267, '75%': 0.3829, 'max': 1.252}, 'concave points_worst': {'count': 569.0, 'mean': 0.11460622319859401, 'std': 0.06573234119594207, 'min': 0.0, '25%': 0.06493, '50%': 0.09993, '75%': 0.1614, 'max': 0.291}, 'symmetry_worst': {'count': 569.0, 'mean': 0.2900755711775044, 'std': 0.061867467537518685, 'min': 0.1565, '25%': 0.2504, '50%': 0.2822, '75%': 0.3179, 'max': 0.6638}, 'fractal_dimension_worst': {'count': 569.0, 'mean': 0.0839458172231986, 'std': 0.018061267348893986, 'min': 0.05504, '25%': 0.07146, '50%': 0.08004, '75%': 0.09208, 'max': 0.2075}} <dataframe_info> RangeIndex: 569 entries, 0 to 568 Data columns (total 32 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 569 non-null int64 1 diagnosis 569 non-null object 2 radius_mean 569 non-null float64 3 texture_mean 569 non-null float64 4 perimeter_mean 569 non-null float64 5 area_mean 569 non-null float64 6 smoothness_mean 569 non-null float64 7 compactness_mean 569 non-null float64 8 concavity_mean 569 non-null float64 9 concave points_mean 569 non-null float64 10 symmetry_mean 569 non-null float64 11 fractal_dimension_mean 569 non-null float64 12 radius_se 569 non-null float64 13 texture_se 569 non-null float64 14 perimeter_se 569 non-null float64 15 area_se 569 non-null float64 16 smoothness_se 569 non-null float64 17 compactness_se 569 non-null float64 18 concavity_se 569 non-null float64 19 concave points_se 569 non-null float64 20 symmetry_se 569 non-null float64 21 fractal_dimension_se 569 non-null float64 22 radius_worst 569 non-null float64 23 texture_worst 569 non-null float64 24 perimeter_worst 569 non-null float64 25 area_worst 569 non-null float64 26 smoothness_worst 569 non-null float64 27 compactness_worst 569 non-null float64 28 concavity_worst 569 non-null float64 29 concave points_worst 569 non-null float64 30 symmetry_worst 569 non-null float64 31 fractal_dimension_worst 569 non-null float64 dtypes: float64(30), int64(1), object(1) memory usage: 142.4+ KB <some_examples> {'id': {'0': 842302, '1': 842517, '2': 84300903, '3': 84348301}, 'diagnosis': {'0': 'M', '1': 'M', '2': 'M', '3': 'M'}, 'radius_mean': {'0': 17.99, '1': 20.57, '2': 19.69, '3': 11.42}, 'texture_mean': {'0': 10.38, '1': 17.77, '2': 21.25, '3': 20.38}, 'perimeter_mean': {'0': 122.8, '1': 132.9, '2': 130.0, '3': 77.58}, 'area_mean': {'0': 1001.0, '1': 1326.0, '2': 1203.0, '3': 386.1}, 'smoothness_mean': {'0': 0.1184, '1': 0.08474, '2': 0.1096, '3': 0.1425}, 'compactness_mean': {'0': 0.2776, '1': 0.07864, '2': 0.1599, '3': 0.2839}, 'concavity_mean': {'0': 0.3001, '1': 0.0869, '2': 0.1974, '3': 0.2414}, 'concave points_mean': {'0': 0.1471, '1': 0.07017, '2': 0.1279, '3': 0.1052}, 'symmetry_mean': {'0': 0.2419, '1': 0.1812, '2': 0.2069, '3': 0.2597}, 'fractal_dimension_mean': {'0': 0.07871, '1': 0.05667, '2': 0.05999, '3': 0.09744}, 'radius_se': {'0': 1.095, '1': 0.5435, '2': 0.7456, '3': 0.4956}, 'texture_se': {'0': 0.9053, '1': 0.7339, '2': 0.7869, '3': 1.156}, 'perimeter_se': {'0': 8.589, '1': 3.398, '2': 4.585, '3': 3.445}, 'area_se': {'0': 153.4, '1': 74.08, '2': 94.03, '3': 27.23}, 'smoothness_se': {'0': 0.006399, '1': 0.005225, '2': 0.00615, '3': 0.00911}, 'compactness_se': {'0': 0.04904, '1': 0.01308, '2': 0.04006, '3': 0.07458}, 'concavity_se': {'0': 0.05373, '1': 0.0186, '2': 0.03832, '3': 0.05661}, 'concave points_se': {'0': 0.01587, '1': 0.0134, '2': 0.02058, '3': 0.01867}, 'symmetry_se': {'0': 0.03003, '1': 0.01389, '2': 0.0225, '3': 0.05963}, 'fractal_dimension_se': {'0': 0.006193, '1': 0.003532, '2': 0.004571, '3': 0.009208}, 'radius_worst': {'0': 25.38, '1': 24.99, '2': 23.57, '3': 14.91}, 'texture_worst': {'0': 17.33, '1': 23.41, '2': 25.53, '3': 26.5}, 'perimeter_worst': {'0': 184.6, '1': 158.8, '2': 152.5, '3': 98.87}, 'area_worst': {'0': 2019.0, '1': 1956.0, '2': 1709.0, '3': 567.7}, 'smoothness_worst': {'0': 0.1622, '1': 0.1238, '2': 0.1444, '3': 0.2098}, 'compactness_worst': {'0': 0.6656, '1': 0.1866, '2': 0.4245, '3': 0.8663}, 'concavity_worst': {'0': 0.7119, '1': 0.2416, '2': 0.4504, '3': 0.6869}, 'concave points_worst': {'0': 0.2654, '1': 0.186, '2': 0.243, '3': 0.2575}, 'symmetry_worst': {'0': 0.4601, '1': 0.275, '2': 0.3613, '3': 0.6638}, 'fractal_dimension_worst': {'0': 0.1189, '1': 0.08902, '2': 0.08758, '3': 0.173}} <end_description>
813
10
3,215
813
129806854
<jupyter_start><jupyter_text>Finland_Cities_Database I have obtained this dataset from: https://simplemaps.com/data/fi-cities The website clearly states that you have complete freedom to utilize this dataset for personal or commercial applications. The dataset consists of a list of 357 prominent cities in Finland. Each entry in the dataset includes essential information such as latitude, longitude, region etc... Kaggle dataset identifier: finland-cities-database <jupyter_script># # Installation # This dataset is from :https://simplemaps.com/data/fi-cities # I have selected this dataset to demonstrate my skills in doing analytical work, despite not having previous work experience as geospatical analysis. The dataset consists of 357 cities in Finland, and it includes essential information such as latitude, longitude, region, and population for each city. With this dataset, I aim to demonstrate my analytical capabilities and proficiency in working with geospatial data. # Importing Libraries import geopandas as gpd import pandas as pd from shapely.geometry import Point, Polygon import folium data = pd.read_csv("/kaggle/input/finland-cities-database/fi.csv") data.columns data.info() # # load the dataset # display the first five rows of the dataset data.head(5) # create a geometry column data["coordinates"] = list(zip(data.lng, data.lat)) # Make geometry column Shapely objects data["coordinates"] = data["coordinates"].apply(Point) # Cast as GeoDataFrame gdf = gpd.GeoDataFrame(data, geometry="coordinates") # display Geodataframe gdf # Perform geospatial analysis world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres")) ax = world.plot(figsize=(12, 8)) data.plot(ax=ax, color="red", markersize=5) plt.title("Finland") plt.show() # ## five largest cities in Finland # # Sort the DataFrame by population in descending order df_sorted = data.sort_values("population", ascending=False) print("Five largest Largest Cities in Finland:") # Get the top 5 largest cities df_sorted.head(5) # Get the count of unique cities # city_counts = data['city'].value_counts() map1 = folium.Map(location=[61.9241, 25.7482], zoom_start=6, prefer_canavas=True) map1.add_child(folium.Marker(location=[60.1708, 24.9375], popup="Helsinki")) map1.add_child(folium.Marker(location=[61.4981, 23.7600], popup="Tampere")) map1.add_child(folium.Marker(location=[60.2056, 24.6556], popup="Espoo")) map1.add_child(folium.Marker(location=[60.4500, 22.2667], popup="Turku")) map1.add_child(folium.Marker(location=[60.2944, 25.0403], popup="Vantaa")) folium.PolyLine( locations=[ (60.1708, 24.9375), (61.4981, 23.7600), (60.2056, 24.6556), (60.4500, 22.2667), (60.2944, 25.0403), ], weight=5, color="pink", line_opacity=0.5, ).add_to(map1) map1
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/806/129806854.ipynb
finland-cities-database
phoonyein
[{"Id": 129806854, "ScriptId": 38594914, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6231579, "CreationDate": "05/16/2023 15:41:50", "VersionNumber": 3.0, "Title": "geospatical_data_analysis", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 85.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 80.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186179744, "KernelVersionId": 129806854, "SourceDatasetVersionId": 5698726}]
[{"Id": 5698726, "DatasetId": 3276489, "DatasourceVersionId": 5774378, "CreatorUserId": 6231579, "LicenseName": "Other (specified in description)", "CreationDate": "05/16/2023 13:14:28", "VersionNumber": 3.0, "Title": "Finland_Cities_Database", "Slug": "finland-cities-database", "Subtitle": "Comprehensive Information on 357 Prominent Cities of FINLAND", "Description": "I have obtained this dataset from: https://simplemaps.com/data/fi-cities \n\nThe website clearly states that you have complete freedom to utilize this dataset for personal or commercial applications. The dataset consists of a list of 357 prominent cities in Finland. Each entry in the dataset includes essential information such as latitude, longitude, region etc...", "VersionNotes": "Data Update 2023-05-16", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3276489, "CreatorUserId": 6231579, "OwnerUserId": 6231579.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5701214.0, "CurrentDatasourceVersionId": 5776893.0, "ForumId": 3342170, "Type": 2, "CreationDate": "05/16/2023 12:09:43", "LastActivityDate": "05/16/2023", "TotalViews": 75, "TotalDownloads": 4, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 6231579, "UserName": "phoonyein", "DisplayName": "Phoo Nyein", "RegisterDate": "11/24/2020", "PerformanceTier": 0}]
# # Installation # This dataset is from :https://simplemaps.com/data/fi-cities # I have selected this dataset to demonstrate my skills in doing analytical work, despite not having previous work experience as geospatical analysis. The dataset consists of 357 cities in Finland, and it includes essential information such as latitude, longitude, region, and population for each city. With this dataset, I aim to demonstrate my analytical capabilities and proficiency in working with geospatial data. # Importing Libraries import geopandas as gpd import pandas as pd from shapely.geometry import Point, Polygon import folium data = pd.read_csv("/kaggle/input/finland-cities-database/fi.csv") data.columns data.info() # # load the dataset # display the first five rows of the dataset data.head(5) # create a geometry column data["coordinates"] = list(zip(data.lng, data.lat)) # Make geometry column Shapely objects data["coordinates"] = data["coordinates"].apply(Point) # Cast as GeoDataFrame gdf = gpd.GeoDataFrame(data, geometry="coordinates") # display Geodataframe gdf # Perform geospatial analysis world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres")) ax = world.plot(figsize=(12, 8)) data.plot(ax=ax, color="red", markersize=5) plt.title("Finland") plt.show() # ## five largest cities in Finland # # Sort the DataFrame by population in descending order df_sorted = data.sort_values("population", ascending=False) print("Five largest Largest Cities in Finland:") # Get the top 5 largest cities df_sorted.head(5) # Get the count of unique cities # city_counts = data['city'].value_counts() map1 = folium.Map(location=[61.9241, 25.7482], zoom_start=6, prefer_canavas=True) map1.add_child(folium.Marker(location=[60.1708, 24.9375], popup="Helsinki")) map1.add_child(folium.Marker(location=[61.4981, 23.7600], popup="Tampere")) map1.add_child(folium.Marker(location=[60.2056, 24.6556], popup="Espoo")) map1.add_child(folium.Marker(location=[60.4500, 22.2667], popup="Turku")) map1.add_child(folium.Marker(location=[60.2944, 25.0403], popup="Vantaa")) folium.PolyLine( locations=[ (60.1708, 24.9375), (61.4981, 23.7600), (60.2056, 24.6556), (60.4500, 22.2667), (60.2944, 25.0403), ], weight=5, color="pink", line_opacity=0.5, ).add_to(map1) map1
false
1
820
0
922
820
129806513
<jupyter_start><jupyter_text>General Knowledge QA This is a dataset of questions on general knowledge for children 4-7 years old and students up to grade 7. So you may use it to train, test or finetune you NLP model Some of the questions are based on images (located in folder `images`) # Content - **question** - question - **answer** - answer - **question_type** - level of question - **image** - an image for question if required Kaggle dataset identifier: general-knowledge-qa <jupyter_script># #### This is just a simple example of using GPT2 to solve QA task # ## Imports and preparations import pandas as pd import warnings warnings.filterwarnings("ignore") from transformers import GPT2LMHeadModel, GPT2Tokenizer import torch DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # model_name_or_path = "sberbank-ai/rugpt3large_based_on_gpt2" model_name_or_path = "gpt2" tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path) model = GPT2LMHeadModel.from_pretrained(model_name_or_path).to(DEVICE) def answer(question): text = " Question: " + question + " Answer:" input_ids = tokenizer.encode(text, return_tensors="pt").to(DEVICE) out = model.generate(input_ids, do_sample=False) generated_text = list(map(tokenizer.decode, out))[0] print(generated_text) # ## Data df = pd.read_csv("/kaggle/input/general-knowledge-qa/general_knowledge_qa.csv") table = df.head() display(table) questions = table.question.tolist() # ## Asking the model these questions for question in questions: answer(question)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/806/129806513.ipynb
general-knowledge-qa
ilyaryabov
[{"Id": 129806513, "ScriptId": 38486836, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1419792, "CreationDate": "05/16/2023 15:38:38", "VersionNumber": 2.0, "Title": "GPT2 solving General Knowledge QA", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 42.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 41.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 186179336, "KernelVersionId": 129806513, "SourceDatasetVersionId": 5699707}]
[{"Id": 5699707, "DatasetId": 3277434, "DatasourceVersionId": 5775367, "CreatorUserId": 1419792, "LicenseName": "CC0: Public Domain", "CreationDate": "05/16/2023 15:29:53", "VersionNumber": 1.0, "Title": "General Knowledge QA", "Slug": "general-knowledge-qa", "Subtitle": "Dataset to train, test or finetune your NLP model", "Description": "This is a dataset of questions on general knowledge for children 4-7 years old and students up to grade 7. So you may use it to train, test or finetune you NLP model\n\nSome of the questions are based on images (located in folder `images`)\n\n# Content \n\n - **question** - question\n - **answer** - answer\n - **question_type** - level of question\n - **image** - an image for question if required", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 3277434, "CreatorUserId": 1419792, "OwnerUserId": 1419792.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5699707.0, "CurrentDatasourceVersionId": 5775367.0, "ForumId": 3343123, "Type": 2, "CreationDate": "05/16/2023 15:29:53", "LastActivityDate": "05/16/2023", "TotalViews": 480, "TotalDownloads": 77, "TotalVotes": 1, "TotalKernels": 1}]
[{"Id": 1419792, "UserName": "ilyaryabov", "DisplayName": "Right Goose", "RegisterDate": "11/15/2017", "PerformanceTier": 2}]
# #### This is just a simple example of using GPT2 to solve QA task # ## Imports and preparations import pandas as pd import warnings warnings.filterwarnings("ignore") from transformers import GPT2LMHeadModel, GPT2Tokenizer import torch DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # model_name_or_path = "sberbank-ai/rugpt3large_based_on_gpt2" model_name_or_path = "gpt2" tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path) model = GPT2LMHeadModel.from_pretrained(model_name_or_path).to(DEVICE) def answer(question): text = " Question: " + question + " Answer:" input_ids = tokenizer.encode(text, return_tensors="pt").to(DEVICE) out = model.generate(input_ids, do_sample=False) generated_text = list(map(tokenizer.decode, out))[0] print(generated_text) # ## Data df = pd.read_csv("/kaggle/input/general-knowledge-qa/general_knowledge_qa.csv") table = df.head() display(table) questions = table.question.tolist() # ## Asking the model these questions for question in questions: answer(question)
false
1
333
0
455
333
129806406
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") train = pd.read_csv(r"/kaggle/input/playground-series-s3e15/data.csv") train.head() sub = pd.read_csv(r"/kaggle/input/playground-series-s3e15/sample_submission.csv") sub.head() test = train[train["x_e_out [-]"].isnull()] test.drop("x_e_out [-]", axis=1, inplace=True) test.head() train_new = train[~train["x_e_out [-]"].isnull()] train_new.head() train_new.drop("id", axis=1, inplace=True) test.drop("id", axis=1, inplace=True) train_new.dtypes train_new.isnull().sum() train_new.nunique() train_new["author"].replace(np.nan, "missing", inplace=True) test["author"].replace(np.nan, "missing", inplace=True) train_new["geometry"].replace(np.nan, "missing", inplace=True) test["geometry"].replace(np.nan, "missing", inplace=True) num_columns = np.where((train_new.dtypes == float).values.astype(int) == 1)[0] cat_cols = np.where((train_new.dtypes == object).values.astype(int) == 1)[0] def plot(data): fig, axes = plt.subplots(3, 3, figsize=(16, 14)) for i, ax in zip(num_columns, axes.flatten()): sns.histplot(data, x=data.columns[i], kde=True, bins=30, ax=ax) fig.show() plot(train_new) from sklearn.impute import SimpleImputer m = ["pressure [MPa]", "mass_flux [kg/m2-s]", "D_e [mm]", "D_h [mm]", "length [mm]"] imputer = SimpleImputer(strategy="median") train_new[m] = imputer.fit_transform(train_new[m]) test[m] = imputer.transform(test[m]) train_new.head() X = train_new.drop("x_e_out [-]", axis=1) y = train_new["x_e_out [-]"] from catboost import CatBoostRegressor from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error fold = KFold(n_splits=5, shuffle=True, random_state=42) cat_model = CatBoostRegressor() for fold, (train_idx, val_idx) in enumerate(fold.split(X, y)): X_train, y_train = X.iloc[train_idx, :], y.iloc[train_idx] X_test, y_test = X.iloc[val_idx, :], y.iloc[val_idx] print("\n========FOLD", fold, "============") cat_model.fit( X_train, y_train, eval_set=[(X_test, y_test)], cat_features=cat_cols, early_stopping_rounds=200, verbose=100, ) # Train model # Make predictions y_train_pred = cat_model.predict(X_train) # for train y_test_pred = cat_model.predict(X_test) # for val train_rmse = mean_squared_error(y_train, y_train_pred, squared=False) val_rmse = mean_squared_error(y_test, y_test_pred, squared=False) print("Model performance for Training set") print("- Root Mean Squared Error: {:.4f}".format(train_rmse)) print("----------------------------------") print("Model performance for Val set") print("- Root Mean Squared Error: {:.4f}".format(val_rmse)) print("=" * 35) print("\n") feature_importance = cat_model.feature_importances_ sorted_idx = np.argsort(feature_importance) fig = plt.figure(figsize=(12, 6)) plt.barh(range(len(sorted_idx)), feature_importance[sorted_idx], align="center") plt.yticks(range(len(sorted_idx)), np.array(X_test.columns)[sorted_idx]) plt.title("Feature Importance") plt.show() sub["x_e_out [-]"] = cat_model.predict(test) sub.to_csv("pred.csv", index=False) sub.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/806/129806406.ipynb
null
null
[{"Id": 129806406, "ScriptId": 38595200, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4811132, "CreationDate": "05/16/2023 15:37:44", "VersionNumber": 1.0, "Title": "[Heat Flux] - catboost baseline", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 107.0, "LinesInsertedFromPrevious": 107.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 11}]
null
null
null
null
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") train = pd.read_csv(r"/kaggle/input/playground-series-s3e15/data.csv") train.head() sub = pd.read_csv(r"/kaggle/input/playground-series-s3e15/sample_submission.csv") sub.head() test = train[train["x_e_out [-]"].isnull()] test.drop("x_e_out [-]", axis=1, inplace=True) test.head() train_new = train[~train["x_e_out [-]"].isnull()] train_new.head() train_new.drop("id", axis=1, inplace=True) test.drop("id", axis=1, inplace=True) train_new.dtypes train_new.isnull().sum() train_new.nunique() train_new["author"].replace(np.nan, "missing", inplace=True) test["author"].replace(np.nan, "missing", inplace=True) train_new["geometry"].replace(np.nan, "missing", inplace=True) test["geometry"].replace(np.nan, "missing", inplace=True) num_columns = np.where((train_new.dtypes == float).values.astype(int) == 1)[0] cat_cols = np.where((train_new.dtypes == object).values.astype(int) == 1)[0] def plot(data): fig, axes = plt.subplots(3, 3, figsize=(16, 14)) for i, ax in zip(num_columns, axes.flatten()): sns.histplot(data, x=data.columns[i], kde=True, bins=30, ax=ax) fig.show() plot(train_new) from sklearn.impute import SimpleImputer m = ["pressure [MPa]", "mass_flux [kg/m2-s]", "D_e [mm]", "D_h [mm]", "length [mm]"] imputer = SimpleImputer(strategy="median") train_new[m] = imputer.fit_transform(train_new[m]) test[m] = imputer.transform(test[m]) train_new.head() X = train_new.drop("x_e_out [-]", axis=1) y = train_new["x_e_out [-]"] from catboost import CatBoostRegressor from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error fold = KFold(n_splits=5, shuffle=True, random_state=42) cat_model = CatBoostRegressor() for fold, (train_idx, val_idx) in enumerate(fold.split(X, y)): X_train, y_train = X.iloc[train_idx, :], y.iloc[train_idx] X_test, y_test = X.iloc[val_idx, :], y.iloc[val_idx] print("\n========FOLD", fold, "============") cat_model.fit( X_train, y_train, eval_set=[(X_test, y_test)], cat_features=cat_cols, early_stopping_rounds=200, verbose=100, ) # Train model # Make predictions y_train_pred = cat_model.predict(X_train) # for train y_test_pred = cat_model.predict(X_test) # for val train_rmse = mean_squared_error(y_train, y_train_pred, squared=False) val_rmse = mean_squared_error(y_test, y_test_pred, squared=False) print("Model performance for Training set") print("- Root Mean Squared Error: {:.4f}".format(train_rmse)) print("----------------------------------") print("Model performance for Val set") print("- Root Mean Squared Error: {:.4f}".format(val_rmse)) print("=" * 35) print("\n") feature_importance = cat_model.feature_importances_ sorted_idx = np.argsort(feature_importance) fig = plt.figure(figsize=(12, 6)) plt.barh(range(len(sorted_idx)), feature_importance[sorted_idx], align="center") plt.yticks(range(len(sorted_idx)), np.array(X_test.columns)[sorted_idx]) plt.title("Feature Importance") plt.show() sub["x_e_out [-]"] = cat_model.predict(test) sub.to_csv("pred.csv", index=False) sub.head()
false
0
1,141
11
1,141
1,141
129806322
# ### Importing Libraries from sklearn.datasets import make_classification # generate classification datasets import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression # ### Dataset Generation X, y = make_classification( n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=41, hypercube=False, class_sep=10, ) # hypercube - if True then the clusters are kept at the vertices of a hypercube(geometrical shape), if False the clusters are put on the vertices of a random polytope. # class_sep - greater values spread out the clusters X.shape # ### Data Representation plt.figure(figsize=(8, 5)) plt.scatter(X[:, 0], X[:, 1], c=y, s=100) # c - colours, s - marker size plt.show() # ### Perceptron Trick Algorithm epochs = 1000 lr = 0.1 # learning rate def step(z): if z > 0: return 1 return 0 def perceptron_step(X, y): X = np.insert(X, 0, 1, axis=1) # Adding a col of 1s to X weights = np.ones(X.shape[1]) for i in range(epochs): j = np.random.randint(0, 100) y_pred = step(np.dot(X[j], weights)) weights = weights + (lr * (y[j] - y_pred)) * X[j] return weights[0], weights[1:] def sigmoid(z): return 1 / (1 + np.exp(-z)) def perceptron_sigmoid(X, y): X = np.insert(X, 0, 1, axis=1) weights = np.ones(X.shape[1]) for i in range(epochs): j = np.random.randint(0, 100) y_pred = sigmoid(np.dot(X[j], weights)) weights = weights + (lr * (y[j] - y_pred)) * X[j] return weights[0], weights[1:] intercept1, coefs1 = perceptron_step(X, y) print(intercept1, " ", coefs1) intercept2, coefs2 = perceptron_sigmoid(X, y) print(intercept2, " ", coefs2) # using line equations ax + by + c = 0, m = -a/b b = -c/b # line using perceptron model (Step function) m1 = -(coefs1[0] / coefs1[1]) b1 = -(intercept1 / coefs1[1]) # line using sigmoid model m2 = -(coefs2[0] / coefs2[1]) b2 = -(intercept2 / coefs2[1]) classifier = LogisticRegression() classifier.fit(X, y) intercept3 = classifier.intercept_ coefs3 = classifier.coef_ print(intercept3, " ", coefs3) # (x, y) values for plotting the line x1_input = np.linspace(-3, 3, 100) y1_input = (m1 * x_input) + b1 x2_input = np.linspace(-3, 3, 100) y2_input = (m2 * x_input) + b2 # blue line - step function # red line - sigmoid function plt.figure(figsize=(8, 5)) plt.plot(x1_input, y1_input) plt.plot(x2_input, y2_input, c="r") plt.scatter(X[:, 0], X[:, 1], c=y, s=100) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/806/129806322.ipynb
null
null
[{"Id": 129806322, "ScriptId": 38508762, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6819371, "CreationDate": "05/16/2023 15:36:55", "VersionNumber": 2.0, "Title": "step-v/s-sigmoid-function-for-classification", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 93.0, "LinesInsertedFromPrevious": 47.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 46.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ### Importing Libraries from sklearn.datasets import make_classification # generate classification datasets import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression # ### Dataset Generation X, y = make_classification( n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=41, hypercube=False, class_sep=10, ) # hypercube - if True then the clusters are kept at the vertices of a hypercube(geometrical shape), if False the clusters are put on the vertices of a random polytope. # class_sep - greater values spread out the clusters X.shape # ### Data Representation plt.figure(figsize=(8, 5)) plt.scatter(X[:, 0], X[:, 1], c=y, s=100) # c - colours, s - marker size plt.show() # ### Perceptron Trick Algorithm epochs = 1000 lr = 0.1 # learning rate def step(z): if z > 0: return 1 return 0 def perceptron_step(X, y): X = np.insert(X, 0, 1, axis=1) # Adding a col of 1s to X weights = np.ones(X.shape[1]) for i in range(epochs): j = np.random.randint(0, 100) y_pred = step(np.dot(X[j], weights)) weights = weights + (lr * (y[j] - y_pred)) * X[j] return weights[0], weights[1:] def sigmoid(z): return 1 / (1 + np.exp(-z)) def perceptron_sigmoid(X, y): X = np.insert(X, 0, 1, axis=1) weights = np.ones(X.shape[1]) for i in range(epochs): j = np.random.randint(0, 100) y_pred = sigmoid(np.dot(X[j], weights)) weights = weights + (lr * (y[j] - y_pred)) * X[j] return weights[0], weights[1:] intercept1, coefs1 = perceptron_step(X, y) print(intercept1, " ", coefs1) intercept2, coefs2 = perceptron_sigmoid(X, y) print(intercept2, " ", coefs2) # using line equations ax + by + c = 0, m = -a/b b = -c/b # line using perceptron model (Step function) m1 = -(coefs1[0] / coefs1[1]) b1 = -(intercept1 / coefs1[1]) # line using sigmoid model m2 = -(coefs2[0] / coefs2[1]) b2 = -(intercept2 / coefs2[1]) classifier = LogisticRegression() classifier.fit(X, y) intercept3 = classifier.intercept_ coefs3 = classifier.coef_ print(intercept3, " ", coefs3) # (x, y) values for plotting the line x1_input = np.linspace(-3, 3, 100) y1_input = (m1 * x_input) + b1 x2_input = np.linspace(-3, 3, 100) y2_input = (m2 * x_input) + b2 # blue line - step function # red line - sigmoid function plt.figure(figsize=(8, 5)) plt.plot(x1_input, y1_input) plt.plot(x2_input, y2_input, c="r") plt.scatter(X[:, 0], X[:, 1], c=y, s=100) plt.show()
false
0
970
0
970
970
129806046
# import libraries import warnings import numpy as np import pandas as pd import seaborn as sns warnings.filterwarnings("ignore") # # Feature Engineering # ## Introduction # In machine learning or deep learning, feature engineering becomes more and more important, which has been already validated to be the most efficient technique to improve the model performance. # This work is a summary of some popular feature engineering techniques, including, # * Feature scaling # * Feature transformation # * Feature selection # Note that in this work, the data from Titanic competition will be used to illustrate different feature engineering techniques, # data overview df = pd.read_csv( "/kaggle/input/titanic/train.csv", usecols=["Survived", "Age", "Fare"], ) df.info()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/806/129806046.ipynb
null
null
[{"Id": 129806046, "ScriptId": 38601608, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12423408, "CreationDate": "05/16/2023 15:34:43", "VersionNumber": 6.0, "Title": "Feature Engineering", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 56.0, "LinesInsertedFromPrevious": 24.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 32.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# import libraries import warnings import numpy as np import pandas as pd import seaborn as sns warnings.filterwarnings("ignore") # # Feature Engineering # ## Introduction # In machine learning or deep learning, feature engineering becomes more and more important, which has been already validated to be the most efficient technique to improve the model performance. # This work is a summary of some popular feature engineering techniques, including, # * Feature scaling # * Feature transformation # * Feature selection # Note that in this work, the data from Titanic competition will be used to illustrate different feature engineering techniques, # data overview df = pd.read_csv( "/kaggle/input/titanic/train.csv", usecols=["Survived", "Age", "Fare"], ) df.info()
false
0
181
0
181
181
129992399
# Import standard operational packages. import numpy as np import pandas as pd # Important tools for modeling and evaluation. from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler # Import visualization packages. import matplotlib.pyplot as plt import seaborn as sns # Save the `pandas` DataFrame in variable `penguins`. penguins = pd.read_csv("/kaggle/input/penguins/penguins.csv") # Review the first 10 rows. penguins.head(n=10) # Find out how many penguin types there are. penguins["species"].unique() # Find the count of each species type. penguins["species"].value_counts(dropna=False) # Check for missing values. penguins.isnull().sum() # Drop rows with missing values. # Save DataFrame in variable `penguins_subset`. penguins_subset = penguins.dropna(axis=0).reset_index(drop=True) # Check for missing values. penguins_subset.isna().sum() # View first 10 rows. penguins_subset.head(10) # Convert `sex` column from categorical to numeric. penguins_subset = pd.get_dummies(penguins_subset, drop_first=True, columns=["sex"]) # Drop the island column. penguins_subset = penguins_subset.drop(["island"], axis=1) # Exclude `species` variable from X X = penguins_subset.drop(["species"], axis=1) # Scale the features. # Assign the scaled data to variable `X_scaled`. X_scaled = StandardScaler().fit_transform(X) # Fit K-means and evaluate inertia for different values of k. num_clusters = [i for i in range(2, 11)] def kmeans_inertia(num_clusters, x_vals): """ Accepts as arguments list of ints and data array. Fits a KMeans model where k = each value in the list of ints. Returns each k-value's inertia appended to a list. """ inertia = [] for num in num_clusters: kms = KMeans(n_clusters=num, random_state=42) kms.fit(x_vals) inertia.append(kms.inertia_) return inertia # Return a list of inertia for k=2 to 10. inertia = kmeans_inertia(num_clusters, X_scaled) inertia # Create a line plot. plot = sns.lineplot(x=num_clusters, y=inertia, marker="o") plot.set_xlabel("Number of clusters") plot.set_ylabel("Inertia") # Evaluate silhouette score. # Write a function to return a list of each k-value's score. def kmeans_sil(num_clusters, x_vals): """ Accepts as arguments list of ints and data array. Fits a KMeans model where k = each value in the list of ints. Calculates a silhouette score for each k value. Returns each k-value's silhouette score appended to a list. """ sil_score = [] for num in num_clusters: kms = KMeans(n_clusters=num, random_state=42) kms.fit(x_vals) sil_score.append(silhouette_score(x_vals, kms.labels_)) return sil_score sil_score = kmeans_sil(num_clusters, X_scaled) sil_score # Create a line plot. plot = sns.lineplot(x=num_clusters, y=sil_score, marker="o") plot.set_xlabel("# of clusters") plot.set_ylabel("Silhouette Score") # Fit a 6-cluster model. kmeans6 = KMeans(n_clusters=6, random_state=42) kmeans6.fit(X_scaled) # Print unique labels. print("Unique labels:", np.unique(kmeans6.labels_)) # Create a new column `cluster`. penguins_subset["cluster"] = kmeans6.labels_ penguins_subset.head() # Verify if any `cluster` can be differentiated by `species`. penguins_subset.groupby(by=["cluster", "species"]).size() penguins_subset.groupby(by=["cluster", "species"]).size().plot.bar( title="Clusters differentiated by species", figsize=(6, 5), ylabel="Size", xlabel="(Cluster, Species)", ) # Verify if each `cluster` can be differentiated by `species` AND `sex_MALE`. penguins_subset.groupby(by=["cluster", "species", "sex_MALE"]).size().sort_values( ascending=False ) penguins_subset.groupby(by=["cluster", "species", "sex_MALE"]).size().unstack( level="species", fill_value=0 ).plot.bar( title="Clusters differentiated by species and sex", figsize=(6, 5), ylabel="Size", xlabel="(Cluster, Sex)", ) plt.legend(bbox_to_anchor=(1.3, 1.0))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/992/129992399.ipynb
null
null
[{"Id": 129992399, "ScriptId": 38670129, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9485427, "CreationDate": "05/18/2023 01:55:30", "VersionNumber": 1.0, "Title": "Building a K-Means Model with Python", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 132.0, "LinesInsertedFromPrevious": 132.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Import standard operational packages. import numpy as np import pandas as pd # Important tools for modeling and evaluation. from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler # Import visualization packages. import matplotlib.pyplot as plt import seaborn as sns # Save the `pandas` DataFrame in variable `penguins`. penguins = pd.read_csv("/kaggle/input/penguins/penguins.csv") # Review the first 10 rows. penguins.head(n=10) # Find out how many penguin types there are. penguins["species"].unique() # Find the count of each species type. penguins["species"].value_counts(dropna=False) # Check for missing values. penguins.isnull().sum() # Drop rows with missing values. # Save DataFrame in variable `penguins_subset`. penguins_subset = penguins.dropna(axis=0).reset_index(drop=True) # Check for missing values. penguins_subset.isna().sum() # View first 10 rows. penguins_subset.head(10) # Convert `sex` column from categorical to numeric. penguins_subset = pd.get_dummies(penguins_subset, drop_first=True, columns=["sex"]) # Drop the island column. penguins_subset = penguins_subset.drop(["island"], axis=1) # Exclude `species` variable from X X = penguins_subset.drop(["species"], axis=1) # Scale the features. # Assign the scaled data to variable `X_scaled`. X_scaled = StandardScaler().fit_transform(X) # Fit K-means and evaluate inertia for different values of k. num_clusters = [i for i in range(2, 11)] def kmeans_inertia(num_clusters, x_vals): """ Accepts as arguments list of ints and data array. Fits a KMeans model where k = each value in the list of ints. Returns each k-value's inertia appended to a list. """ inertia = [] for num in num_clusters: kms = KMeans(n_clusters=num, random_state=42) kms.fit(x_vals) inertia.append(kms.inertia_) return inertia # Return a list of inertia for k=2 to 10. inertia = kmeans_inertia(num_clusters, X_scaled) inertia # Create a line plot. plot = sns.lineplot(x=num_clusters, y=inertia, marker="o") plot.set_xlabel("Number of clusters") plot.set_ylabel("Inertia") # Evaluate silhouette score. # Write a function to return a list of each k-value's score. def kmeans_sil(num_clusters, x_vals): """ Accepts as arguments list of ints and data array. Fits a KMeans model where k = each value in the list of ints. Calculates a silhouette score for each k value. Returns each k-value's silhouette score appended to a list. """ sil_score = [] for num in num_clusters: kms = KMeans(n_clusters=num, random_state=42) kms.fit(x_vals) sil_score.append(silhouette_score(x_vals, kms.labels_)) return sil_score sil_score = kmeans_sil(num_clusters, X_scaled) sil_score # Create a line plot. plot = sns.lineplot(x=num_clusters, y=sil_score, marker="o") plot.set_xlabel("# of clusters") plot.set_ylabel("Silhouette Score") # Fit a 6-cluster model. kmeans6 = KMeans(n_clusters=6, random_state=42) kmeans6.fit(X_scaled) # Print unique labels. print("Unique labels:", np.unique(kmeans6.labels_)) # Create a new column `cluster`. penguins_subset["cluster"] = kmeans6.labels_ penguins_subset.head() # Verify if any `cluster` can be differentiated by `species`. penguins_subset.groupby(by=["cluster", "species"]).size() penguins_subset.groupby(by=["cluster", "species"]).size().plot.bar( title="Clusters differentiated by species", figsize=(6, 5), ylabel="Size", xlabel="(Cluster, Species)", ) # Verify if each `cluster` can be differentiated by `species` AND `sex_MALE`. penguins_subset.groupby(by=["cluster", "species", "sex_MALE"]).size().sort_values( ascending=False ) penguins_subset.groupby(by=["cluster", "species", "sex_MALE"]).size().unstack( level="species", fill_value=0 ).plot.bar( title="Clusters differentiated by species and sex", figsize=(6, 5), ylabel="Size", xlabel="(Cluster, Sex)", ) plt.legend(bbox_to_anchor=(1.3, 1.0))
false
0
1,270
0
1,270
1,270
129262416
<jupyter_start><jupyter_text>MNIST as .jpg # Context The [Digit Recognizer](https://www.kaggle.com/c/digit-recognizer) competition uses the popular MNIST dataset to challenge Kagglers to classify digits correctly. In this dataset, the images are represented as strings of pixel values in `train.csv` and `test.csv`. Often, it is beneficial for image data to be in an image format rather than a string format. Therefore, I have converted the aforementioned datasets from text in .csv files to organized .jpg files. # Content This dataset is composed of four files: 1. `trainingSet.tar.gz` (10.2 MB) - This file contains ten sub folders labeled 0 to 9. Each of the sub folders contains .jpg images from the [Digit Recognizer](https://www.kaggle.com/c/digit-recognizer) competition's `train.csv` dataset, corresponding to the folder name (ie. folder 2 contains images of 2's, etc.). In total, there are 42,000 images in the training set. 2. `testSet.tar.gz` (6.8 MB) - This file contains the .jpg images from the [Digit Recognizer](https://www.kaggle.com/c/digit-recognizer) competition's `test.csv` dataset. In total, there are 28,000 images in the test set. 3. `trainingSample.zip` (407 KB) - This file contains ten sub folders labeled 0 to 9. Each sub folder contains 60 .jpg images from the training set, for a total of 600 images. 4. `testSample.zip` (233 KB) - This file contains a 350 image sample from the test set. # Acknowledgements As previously mentioned, all data presented here is simply a cleaned version of the data presented in Kaggle's [Digit Recognizer](https://www.kaggle.com/c/digit-recognizer) competition. The division of the MNIST dataset into training and test sets exactly mirrors that presented in the competition. # Inspiration I created this dataset when exploring TensorFlow's Inception model. Inception is a massive CNN built by Google to compete in the ImageNet competition. By way of Transfer Learning, the final layer of Inception can be retrained, rendering the model useful for general classification tasks. In retraining the model, .jpg images must be used, thereby necessitating to the creation of this dataset. My hope in experimenting with Inception was to achieve an accuracy of around 98.5% or higher on the MNIST dataset. Unfortunately, the maximum accuracy I reached with Inception was only 95.314%. If you are interested in my code for said attempt, it is available on my GitHub repository [Kaggle MNIST Inception CNN](https://github.com/scoliann/Kaggle-MNIST-Inception-CNN). To learn more about retraining Inception, check out [TensorFlow for Poets](https://codelabs.developers.google.com/codelabs/tensorflow-for-poets/index.html?index=..%2F..%2Findex#0). Kaggle dataset identifier: mnistasjpg <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F from torchvision.datasets import MNIST import torch import torch.nn as nn from torch.utils.data import DataLoader from torchvision import transforms, datasets from tqdm import tqdm # from torch.utils.tensorboard import SummaryWriter # writer = SummaryWriter() # Define device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Set your parameters n_epochs = 200 z_dim = 100 display_step = 500 batch_size = 128 learning_rate = 0.00001 # Load MNIST dataset train_set = datasets.MNIST( root="./data", train=True, download=True, transform=transforms.ToTensor() ) train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True) class Generator(nn.Module): def __init__(self, zdim=100, image_dim=1): super(Generator, self).__init__() self.linear = nn.Linear(zdim, 3136) self.batchnorm_1 = nn.BatchNorm1d(3136) self.activation_1 = nn.ReLU() self.upsample_1 = nn.Upsample(scale_factor=2, mode="bilinear") self.conv_block_1 = self._generator_block(64, 128) self.upsample_2 = nn.Upsample(scale_factor=2, mode="bilinear") self.conv_block_2 = self._generator_block(128, 64) self.conv_block_3 = self._generator_block(64, 64) self.output = nn.Conv2d(64, image_dim, kernel_size=1) self.activation_2 = nn.ReLU() def _generator_block(self, input_dim, output_dim): return nn.Sequential( nn.Conv2d(input_dim, output_dim, kernel_size=1), nn.BatchNorm2d(output_dim), nn.ReLU(inplace=True), ) def forward(self, x): x = self.linear(x) x = self.batchnorm_1(x) x = self.activation_1(x) x = torch.reshape(x, (-1, 64, 7, 7)) x = self.upsample_1(x) x = self.conv_block_1(x) x = self.upsample_2(x) x = self.conv_block_2(x) x = self.conv_block_3(x) x = self.output(x) x = self.activation_2(x) return x z_vect = torch.randn((128, 100)) out = Generator()(z_vect) plt.imshow(out[0].permute(2, 1, 0).detach().numpy()) plt.show() class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() self.conv_block_1 = self._discriminator_blcok(1, 64, 2) self.conv_block_2 = self._discriminator_blcok(64, 64, 2) self.conv_block_3 = self._discriminator_blcok(64, 128, 2) self.conv_block_4 = self._discriminator_blcok(128, 128, 1) self.flatten = nn.Flatten() self.output = nn.Linear(2048, 1) def _discriminator_blcok(self, in_channels, out_channels, stride): return nn.Sequential( nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1 ), nn.ReLU(), nn.Dropout2d(0.4), ) def forward(self, x): x = self.conv_block_1(x) x = self.conv_block_2(x) x = self.conv_block_3(x) x = self.conv_block_4(x) x = self.flatten(x) x = self.output(x) x = F.sigmoid(x) return x Discriminator()(out) # Load Model gen = Generator(z_dim).to(device) gen_opt = torch.optim.Adam(gen.parameters(), lr=learning_rate) disc = Discriminator().to(device) disc_opt = torch.optim.Adam(disc.parameters(), lr=learning_rate) criterion = nn.BCEWithLogitsLoss().to(device) def get_noise(batch_size, zdim, device=device): return torch.randn((batch_size, zdim)).to(device) def gen_loss(batch_size, zdim, device, gen): noise = get_noise(batch_size, zdim, device=device) fake_2 = gen(noise) disc_fake_pred = disc(fake_2) gen_loss = criterion(disc_fake_pred, torch.ones_like(disc_fake_pred)) return gen_loss def disc_loss(batch_size, zdim, device): fake_noise = get_noise(batch_size, zdim, device=device) fake = gen(fake_noise) disc_fake_pred = disc(fake.detach()) disc_fake_loss = criterion(disc_fake_pred, torch.zeros_like(disc_fake_pred)) disc_real_pred = disc(real) disc_real_loss = criterion(disc_real_pred, torch.ones_like(disc_real_pred)) disc_loss = (disc_fake_loss + disc_real_loss) / 2 return disc_loss # Train models cur_step = 0 mean_generator_loss = 0 mean_discriminator_loss = 0 visualization = False for epoch in range(n_epochs): for real, _ in tqdm(train_loader): cur_batch_size = real.size(0) real = real.to(device) ## Update discriminator ## disc_opt.zero_grad() fake_noise = get_noise(cur_batch_size, z_dim, device=device) fake = gen(fake_noise) disc_fake_pred = disc(fake.detach()) disc_fake_loss = criterion(disc_fake_pred, torch.zeros_like(disc_fake_pred)) disc_real_pred = disc(real) disc_real_loss = criterion(disc_real_pred, torch.ones_like(disc_real_pred)) disc_loss = (disc_fake_loss + disc_real_loss) / 2 # Keep track of the average discriminator loss mean_discriminator_loss += disc_loss.item() / display_step # Update gradients disc_loss.backward(retain_graph=True) # Update optimizer disc_opt.step() ## Update generator ## gen_opt.zero_grad() fake_noise_2 = get_noise(cur_batch_size, z_dim, device=device) fake_2 = gen(fake_noise_2) disc_fake_pred = disc(fake_2) gen_loss = criterion(disc_fake_pred, torch.ones_like(disc_fake_pred)) gen_loss.backward() gen_opt.step() # Keep track of the average generator loss mean_generator_loss += gen_loss.item() / display_step ## Visualization code ## if cur_step % display_step == 0 and cur_step > 0: print( f"Epoch {epoch + 1}, Step {cur_step}: \nGenerator loss: {mean_generator_loss} | Discriminator loss: {mean_discriminator_loss}\n" ) mean_generator_loss = 0 mean_discriminator_loss = 0 cur_step += 1
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/262/129262416.ipynb
mnistasjpg
scolianni
[{"Id": 129262416, "ScriptId": 38427472, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6444558, "CreationDate": "05/12/2023 09:15:10", "VersionNumber": 1.0, "Title": "Workshop-GAN2", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 216.0, "LinesInsertedFromPrevious": 216.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 185150596, "KernelVersionId": 129262416, "SourceDatasetVersionId": 2280}]
[{"Id": 2280, "DatasetId": 1272, "DatasourceVersionId": 2280, "CreatorUserId": 289999, "LicenseName": "CC0: Public Domain", "CreationDate": "05/15/2017 09:10:04", "VersionNumber": 1.0, "Title": "MNIST as .jpg", "Slug": "mnistasjpg", "Subtitle": "Kaggle Digit Recognizer Competition Dataset as .jpg Image Files", "Description": "# Context \n\nThe [Digit Recognizer](https://www.kaggle.com/c/digit-recognizer) competition uses the popular MNIST dataset to challenge Kagglers to classify digits correctly. In this dataset, the images are represented as strings of pixel values in `train.csv` and `test.csv`. Often, it is beneficial for image data to be in an image format rather than a string format. Therefore, I have converted the aforementioned datasets from text in .csv files to organized .jpg files.\n\n# Content\n\nThis dataset is composed of four files:\n\n1. `trainingSet.tar.gz` (10.2 MB) - This file contains ten sub folders labeled 0 to 9. Each of the sub folders contains .jpg images from the [Digit Recognizer](https://www.kaggle.com/c/digit-recognizer) competition's `train.csv` dataset, corresponding to the folder name (ie. folder 2 contains images of 2's, etc.). In total, there are 42,000 images in the training set.\n2. `testSet.tar.gz` (6.8 MB) - This file contains the .jpg images from the [Digit Recognizer](https://www.kaggle.com/c/digit-recognizer) competition's `test.csv` dataset. In total, there are 28,000 images in the test set.\n3. `trainingSample.zip` (407 KB) - This file contains ten sub folders labeled 0 to 9. Each sub folder contains 60 .jpg images from the training set, for a total of 600 images.\n4. `testSample.zip` (233 KB) - This file contains a 350 image sample from the test set.\n\n# Acknowledgements\n\nAs previously mentioned, all data presented here is simply a cleaned version of the data presented in Kaggle's [Digit Recognizer](https://www.kaggle.com/c/digit-recognizer) competition. The division of the MNIST dataset into training and test sets exactly mirrors that presented in the competition.\n\n# Inspiration\n\nI created this dataset when exploring TensorFlow's Inception model. Inception is a massive CNN built by Google to compete in the ImageNet competition. By way of Transfer Learning, the final layer of Inception can be retrained, rendering the model useful for general classification tasks. In retraining the model, .jpg images must be used, thereby necessitating to the creation of this dataset.\n\nMy hope in experimenting with Inception was to achieve an accuracy of around 98.5% or higher on the MNIST dataset. Unfortunately, the maximum accuracy I reached with Inception was only 95.314%. If you are interested in my code for said attempt, it is available on my GitHub repository [Kaggle MNIST Inception CNN](https://github.com/scoliann/Kaggle-MNIST-Inception-CNN).\n\nTo learn more about retraining Inception, check out [TensorFlow for Poets](https://codelabs.developers.google.com/codelabs/tensorflow-for-poets/index.html?index=..%2F..%2Findex#0).", "VersionNotes": "Initial release", "TotalCompressedBytes": 18413932.0, "TotalUncompressedBytes": 18413932.0}]
[{"Id": 1272, "CreatorUserId": 289999, "OwnerUserId": 289999.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2280.0, "CurrentDatasourceVersionId": 2280.0, "ForumId": 3500, "Type": 2, "CreationDate": "05/15/2017 09:10:04", "LastActivityDate": "02/05/2018", "TotalViews": 149486, "TotalDownloads": 38069, "TotalVotes": 315, "TotalKernels": 47}]
[{"Id": 289999, "UserName": "scolianni", "DisplayName": "Stuart Colianni", "RegisterDate": "02/04/2015", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F from torchvision.datasets import MNIST import torch import torch.nn as nn from torch.utils.data import DataLoader from torchvision import transforms, datasets from tqdm import tqdm # from torch.utils.tensorboard import SummaryWriter # writer = SummaryWriter() # Define device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Set your parameters n_epochs = 200 z_dim = 100 display_step = 500 batch_size = 128 learning_rate = 0.00001 # Load MNIST dataset train_set = datasets.MNIST( root="./data", train=True, download=True, transform=transforms.ToTensor() ) train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True) class Generator(nn.Module): def __init__(self, zdim=100, image_dim=1): super(Generator, self).__init__() self.linear = nn.Linear(zdim, 3136) self.batchnorm_1 = nn.BatchNorm1d(3136) self.activation_1 = nn.ReLU() self.upsample_1 = nn.Upsample(scale_factor=2, mode="bilinear") self.conv_block_1 = self._generator_block(64, 128) self.upsample_2 = nn.Upsample(scale_factor=2, mode="bilinear") self.conv_block_2 = self._generator_block(128, 64) self.conv_block_3 = self._generator_block(64, 64) self.output = nn.Conv2d(64, image_dim, kernel_size=1) self.activation_2 = nn.ReLU() def _generator_block(self, input_dim, output_dim): return nn.Sequential( nn.Conv2d(input_dim, output_dim, kernel_size=1), nn.BatchNorm2d(output_dim), nn.ReLU(inplace=True), ) def forward(self, x): x = self.linear(x) x = self.batchnorm_1(x) x = self.activation_1(x) x = torch.reshape(x, (-1, 64, 7, 7)) x = self.upsample_1(x) x = self.conv_block_1(x) x = self.upsample_2(x) x = self.conv_block_2(x) x = self.conv_block_3(x) x = self.output(x) x = self.activation_2(x) return x z_vect = torch.randn((128, 100)) out = Generator()(z_vect) plt.imshow(out[0].permute(2, 1, 0).detach().numpy()) plt.show() class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() self.conv_block_1 = self._discriminator_blcok(1, 64, 2) self.conv_block_2 = self._discriminator_blcok(64, 64, 2) self.conv_block_3 = self._discriminator_blcok(64, 128, 2) self.conv_block_4 = self._discriminator_blcok(128, 128, 1) self.flatten = nn.Flatten() self.output = nn.Linear(2048, 1) def _discriminator_blcok(self, in_channels, out_channels, stride): return nn.Sequential( nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1 ), nn.ReLU(), nn.Dropout2d(0.4), ) def forward(self, x): x = self.conv_block_1(x) x = self.conv_block_2(x) x = self.conv_block_3(x) x = self.conv_block_4(x) x = self.flatten(x) x = self.output(x) x = F.sigmoid(x) return x Discriminator()(out) # Load Model gen = Generator(z_dim).to(device) gen_opt = torch.optim.Adam(gen.parameters(), lr=learning_rate) disc = Discriminator().to(device) disc_opt = torch.optim.Adam(disc.parameters(), lr=learning_rate) criterion = nn.BCEWithLogitsLoss().to(device) def get_noise(batch_size, zdim, device=device): return torch.randn((batch_size, zdim)).to(device) def gen_loss(batch_size, zdim, device, gen): noise = get_noise(batch_size, zdim, device=device) fake_2 = gen(noise) disc_fake_pred = disc(fake_2) gen_loss = criterion(disc_fake_pred, torch.ones_like(disc_fake_pred)) return gen_loss def disc_loss(batch_size, zdim, device): fake_noise = get_noise(batch_size, zdim, device=device) fake = gen(fake_noise) disc_fake_pred = disc(fake.detach()) disc_fake_loss = criterion(disc_fake_pred, torch.zeros_like(disc_fake_pred)) disc_real_pred = disc(real) disc_real_loss = criterion(disc_real_pred, torch.ones_like(disc_real_pred)) disc_loss = (disc_fake_loss + disc_real_loss) / 2 return disc_loss # Train models cur_step = 0 mean_generator_loss = 0 mean_discriminator_loss = 0 visualization = False for epoch in range(n_epochs): for real, _ in tqdm(train_loader): cur_batch_size = real.size(0) real = real.to(device) ## Update discriminator ## disc_opt.zero_grad() fake_noise = get_noise(cur_batch_size, z_dim, device=device) fake = gen(fake_noise) disc_fake_pred = disc(fake.detach()) disc_fake_loss = criterion(disc_fake_pred, torch.zeros_like(disc_fake_pred)) disc_real_pred = disc(real) disc_real_loss = criterion(disc_real_pred, torch.ones_like(disc_real_pred)) disc_loss = (disc_fake_loss + disc_real_loss) / 2 # Keep track of the average discriminator loss mean_discriminator_loss += disc_loss.item() / display_step # Update gradients disc_loss.backward(retain_graph=True) # Update optimizer disc_opt.step() ## Update generator ## gen_opt.zero_grad() fake_noise_2 = get_noise(cur_batch_size, z_dim, device=device) fake_2 = gen(fake_noise_2) disc_fake_pred = disc(fake_2) gen_loss = criterion(disc_fake_pred, torch.ones_like(disc_fake_pred)) gen_loss.backward() gen_opt.step() # Keep track of the average generator loss mean_generator_loss += gen_loss.item() / display_step ## Visualization code ## if cur_step % display_step == 0 and cur_step > 0: print( f"Epoch {epoch + 1}, Step {cur_step}: \nGenerator loss: {mean_generator_loss} | Discriminator loss: {mean_discriminator_loss}\n" ) mean_generator_loss = 0 mean_discriminator_loss = 0 cur_step += 1
false
0
1,977
0
2,741
1,977
129381354
import pandas as pd import numpy as np import seaborn as sns import nltk import matplotlib.pyplot as plt from collections import Counter from bs4 import BeautifulSoup import wordcloud from wordcloud import WordCloud import string import spacy import re from nltk.tokenize import RegexpTokenizer from nltk.corpus import stopwords from nltk.tokenize import WordPunctTokenizer from nltk.stem import WordNetLemmatizer from nltk.stem import PorterStemmer from nltk.stem.snowball import SnowballStemmer from sklearn.preprocessing import LabelEncoder from sklearn import manifold, decomposition from sklearn import cluster, metrics from textblob import TextBlob from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import accuracy_score from sklearn.feature_extraction.text import TfidfVectorizer import statsmodels.formula.api as smf import statsmodels.api as sm import time import random import gensim.corpora as corpora import gensim from pprint import pprint from gensim.models import CoherenceModel from sklearn.metrics import ( accuracy_score, confusion_matrix, ConfusionMatrixDisplay, f1_score, ) # get dataset df = pd.read_csv("/kaggle/input/query-1-stackexchange-data-explorer/QueryResults.csv") df.shape # ### initial lookup df.sample(5) # title and body both have text that can be used to generate a tag : MAYBE TO COMBINE TITLE AND BODY TEXTS? # body and tags are weirdly formated and necessicate cleaning df.corr() # viewcount and answercount are correlated to score # ### Structure df.info() # zero non null / 5 numerical columns for correlation (-1 because Id) df.nunique() # some tags are totally similar / favorite count has only 2 type of values df.dtypes.value_counts() # # df.isnull().sum() # target has no missing value df.duplicated("Body").sum() # all texts are unique df.describe(include=np.number) df["stats"] = df.AnswerCount / df.ViewCount df.stats.sort_values(ascending=False)[1000:].plot.hist(bins=100) plt.title("Average answers per viewcount ") print("In average there are 4 answers per post and 1 answer per ~10 000 views ") # # Tags analysis # Here we create the variable 'tag_unique' that takes the first tag of the list of 5 'Tags' for each post df.Tags = df.Tags.apply( lambda x: x.replace("><", " ").replace("<", "").replace(">", "") ) df["tags_token"] = df.Tags.apply(lambda x: x.split()) corpus_tag = [ tag for tag in df.tags_token for tag in tag ] # create corpus of all the tags df["tag_unique"] = df.tags_token.apply(lambda x: x[0]) corpus_tag_unique = [tag for tag in df.tag_unique] def cum_sum(list_words): df = pd.Series(list_words).value_counts() df["cum_sum"] = 100 * df.cumsum() / df.sum() return df.cum_sum fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4)) cum_sum(corpus_tag)[:25].plot.barh(ax=ax0) ax0.set_title("With All tags") cum_sum(corpus_tag_unique)[:25].plot.barh(ax=ax1) ax1.set_title("Only most used tag per post") fig.subplots_adjust(wspace=0.4) fig.suptitle("Cumulated sum of keywords usage") tags_to_keep = list( cum_sum(corpus_tag_unique)[:10].index ) # TARGETS, WE KEEP 10 of THEM print(f"We keep only the post which Tag_unique is in the top 10 list") print( f"This removes {round((100* (len(df.loc[~(df.tag_unique.isin(tags_to_keep))]))/ len(df)),2)} % of the dataset" ) print(f"the tags taken are", tags_to_keep) df = df.loc[df.tag_unique.isin(tags_to_keep)] # plt.figure() # plt.title("repartition des tags identifié come target") # df.tag_unique.value_counts().plot.pie() # we assign a number to the target in order to use the classificatio algorythms df["target_num"] = LabelEncoder().fit_transform(df.tag_unique) # # ViewCounts fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4)) fig.subplots_adjust(wspace=0.4) fig.suptitle("Distribution of log10(ViewCount)") np.log10(df.ViewCount).plot.hist(ax=ax0, bins=1000) np.log10(df.ViewCount).plot.box(ax=ax1) q1, q3 = np.quantile(np.log10(df.ViewCount), (0.25, 0.75)) flyer_bottom = q1 - 1.5 * (q3 - q1) flyer_top = q3 + 1.5 * (q3 - q1) print(f"We keep only the post which viewcount is above lower moustache") print( f"This removes {round((100* (len(df.loc[(df.ViewCount <= 10**(flyer_bottom))]))/ len(df)),2)} % of the dataset" ) df = df.loc[(df.ViewCount > 10 ** (flyer_bottom))] # &(df.ViewCount < 10**(flyer_top))] # ## Body # Here we use the package bs4 to process the html formatted Body texts # we use BeautifulSoup to parse the html formated text df.Body = df.Body.apply( lambda x: BeautifulSoup(x, "html.parser").get_text().replace("\n", " ") ) df["number_words"] = df.Body.apply(lambda x: len([word for word in x.split()])) print(" most of the message contains", np.max(df.number_words.value_counts()), "words") fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4)) fig.subplots_adjust(wspace=0.4) fig.suptitle("Distribution of numbers of words per post)") df.number_words.sort_values()[:-200].plot.hist(ax=ax0, bins=100) df.number_words.plot.box(ax=ax1, showfliers=False, showmeans=True) start_time = time.time() raw_corpus = "".join(df.Body.values) # makes a string with all the texts print("the caracters present in the post encompass special and emoticons :") print(" ".join(list(set(raw_corpus)))) print("\na cleaning step to remove special caracters and punctuation si necessary") # # Analyses mutltivariées fig, (ax0, ax1, ax2, ax3) = plt.subplots(1, 4, figsize=(12, 4), sharey=True) sns.boxplot(x=df.Score, y=df.tag_unique, showfliers=False, ax=ax0) sns.boxplot(x=df.ViewCount, y=df.tag_unique, showfliers=False, ax=ax1) sns.boxplot(x=df.AnswerCount, y=df.tag_unique, showfliers=False, ax=ax2) sns.boxplot(x=df.number_words, y=df.tag_unique, showfliers=False, ax=ax3) fig.subplots_adjust(wspace=0.8) fig.suptitle("ANOVA analysis for tag_unique") for quantit_var in ["Score", "ViewCount", "AnswerCount", "number_words"]: path = quantit_var + str("~tag_unique") to_test = df.copy() fit_model = smf.ols(path, data=to_test).fit() print("\n---->ANOVA_LM :", quantit_var, "\n", sm.stats.anova_lm(fit_model)) print( "\n there is a statistical significant between the tags and their effect on Score/ViewCount/AnswerCount" ) corr = df.corr() mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True fig, ax = plt.subplots(1, 1, figsize=(5, 4)) ax = sns.heatmap(corr, mask=mask, vmax=1, cmap="summer", annot=True, fmt=".2f") print("there is no correlation between messsage lenght and tags") # ## cleaning text # remove websites def remove_URL(text): """ Remove URLs from a sample string """ return re.sub(r"https?://\S+|www\.\S+", "", text) def remove_non_ascii(text): """ Remove non-ASCII characters """ return re.sub(r"[^\x00-\x7f]", r"", text) def remove_punct(text): """ Remove the punctuation """ return re.sub( r'[]<!"$%&\'()*,/:;=@?>[\\^_`{|}~-]+', "", text ) # keep "." and "#" and "+" to keep the target intact return text.translate(str.maketrans("", "", string.punctuation)) def remove_emojis(text): emoj = re.compile( "[" "\U0001F600-\U0001F64F" # emoticons "\U0001F300-\U0001F5FF" # symbols & pictographs "\U0001F680-\U0001F6FF" # transport & map symbols "\U0001F1E0-\U0001F1FF" # flags (iOS) "\U00002500-\U00002BEF" # chinese char "\U00002702-\U000027B0" "\U00002702-\U000027B0" "\U000024C2-\U0001F251" "\U0001f926-\U0001f937" "\U00010000-\U0010ffff" "\u2640-\u2642" "\u2600-\u2B55" "\u200d" "\u23cf" "\u23e9" "\u231a" "\ufe0f" # dingbats "\u3030" "]+", re.UNICODE, ) return re.sub(emoj, "", text) df.Body = df.Body.apply(remove_URL) print("URL removed") df.Body = df.Body.apply(remove_non_ascii) print("non-ASCII removed") df.Body = df.Body.apply(remove_punct) print("punctuation removed") df.Body = df.Body.apply(remove_emojis) print("emojis removed") df.Title = ( df.Title.apply(remove_URL) .apply(remove_non_ascii) .apply(remove_punct) .apply(remove_emojis) ) # # Pre-process text # ## tockenize mannually and bag of words # Here we use split() instead of nltk.word_tockenize def make_corpus(list_of_lists): """make a corpus from a list of list att : list of list""" return [word for word in list_of_lists.apply(lambda x: x.split()) for word in word] def tags_bag_word(make_corpus_fct, stopwords=None): """plot 4 random bag-of-words with 'Body' column from 'df' dataframe using the 'tags_to_keep' list of word to match the 'tag_unique' att : fct to make corpus from list of list""" for tag in random.sample(tags_to_keep, 4): corpus = make_corpus_fct(df.loc[df.tag_unique == tag, "Body"]) wordcloud = WordCloud( background_color="white", stopwords=stopwords, max_words=100 ).generate(" ".join(corpus)) plt.title("For the tag " + tag) plt.imshow(wordcloud) plt.axis("off") plt.show() tags_bag_word(make_corpus) # ### there are words like "new", "use", "self" ... need to be removed # # creating stopwords sets # We need to remove token that appears to many times in all targets and those appearing too little.\ # # tokenize corpus all_tokens = nltk.word_tokenize("".join(df.Body.values).lower()) all_tokens_count = Counter(all_tokens) print(f"the corpus countain {len(all_tokens_count)} uniques word") # create stop word for token appearing too little at least 10 times min_freq_tokens = [] for token, count in all_tokens_count.most_common(): if count < 6: min_freq_tokens.append(token) stopwords_min_freq = set(min_freq_tokens) print(f"the corpus countain {len(stopwords_min_freq)} tokens used 5 times or less") # tokenize corpus per tag max_freq_tokens = [] for tag in tags_to_keep: print(f"considering the {tag} tag") df_temp = df.loc[df.tag_unique == tag] tokens_tag = nltk.word_tokenize("".join(df_temp.Body.values).lower()) print(f"there are {len(set(tokens_tag))} unique tokens") max_freq_tokens.append([i for i, k in Counter(tokens_tag).most_common(200)]) words_max_freq = Counter([word for vec in max_freq_tokens for word in vec]) stopwords_max_freq = set([word for word, count in words_max_freq.items() if count > 5]) print( f"\namoung the 200 most comon token in each categorie, there are {len(stopwords_max_freq)} that are comon to at least 6 categories" ) stopwords = stopwords_min_freq.union(stopwords_max_freq) tags_bag_word(make_corpus, stopwords) # now we have topics clearly identified after removing the most common words, so we'll try LDA to see if topics emerge from themselves print(f"the min_freq stopwords set contains {len(stopwords_min_freq)} words") print(f"the max_freq stopwords set contains {len(stopwords_max_freq)} words") print( f"there are {len(stopwords.intersection(nltk.corpus.stopwords.words('english')))} words already in nltk stopwords set" ) print( f"the nltk stopwords set contains {len(nltk.corpus.stopwords.words('english'))} words" ) # ## final preprocessing def make_tokens(text): return nltk.word_tokenize(text.lower().strip()) def remove_sw_tokens(text, more_stopwords=None): if more_stopwords: stop_words = stopwords.union(nltk.corpus.stopwords.words("english")) else: stop_words = nltk.corpus.stopwords.words("english") return [word for word in text if word not in stopwords] def remove_stopwords_texts(texts): return [ [word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts ] def snow_stemmer(text): snow_stemmer = SnowballStemmer(language="english") return [snow_stemmer.stem(word) for word in text] #!unzip /usr/share/nltk_data/corpora/wordnet.zip -d /usr/share/nltk_data/corpora/ # def lemma_fct(text) : # lemmatizer = WordNetLemmatizer() # return [lemmatizer.lemmatize(word) for word in text] # Apply stemming to the text nlp = spacy.load("en_core_web_sm") def spacy(text): return [ token.lemma_ for token in nlp(text) if not (token.is_stop or token.is_punct) ] df["Body_original"] = df["Body"].copy() df.Body = df.Body.apply(make_tokens) df["Body_nostop"] = df.Body.apply(lambda x: remove_sw_tokens(x)) df["Body_snowstem"] = df.Body_nostop.apply(snow_stemmer) # df['Body_lemma'] = df.Body_nostop.apply(lemma_fct) # df['Body_spacy'] = df.Body_nostop.apply(spcay) # # Export dataset for further analysis df.to_csv( "Cleaned_SOF_OCP_P5.csv", encoding="utf-8", sep="\t", header=True, index=False ) # # unsupervized clusturing with LDA # ### https://towardsdatascience.com/evaluate-topic-model-in-python-latent-dirichlet-allocation-lda-7d57484bb5d0 data_snowstem = df.Body_snowstem.values.tolist() print( "Exemple of sentence after wtopwords removal and stemmatization :\n", data_snowstem[:1][0][:30], ) id2word = corpora.Dictionary(data_snowstem) texts = data_snowstem corpus = [id2word.doc2bow(text) for text in data_snowstem] # Term Document Frequency print("The same sentence transformed by the bag of word :\n", corpus[:1][0][:30]) # instantiate the model lda_model = gensim.models.LdaMulticore( corpus=corpus, id2word=id2word, num_topics=10, # as many as there are tags selected random_state=100, chunksize=100, passes=10, per_word_topics=True, ) pprint(lda_model.print_topics()) doc_lda = lda_model[corpus] # Compute Coherence Score coherence_model_lda = CoherenceModel( model=lda_model, texts=data_snowstem, dictionary=id2word, coherence="c_v" ) coherence_lda = coherence_model_lda.get_coherence() print("\n***************\nCoherence Score: ", coherence_lda) # import pyLDAvis.gensim_models as gensimvis import pickle import pyLDAvis from pyLDAvis import gensim as gensimvis import os # Visualize the topics pyLDAvis.enable_notebook() LDAvis_prepared = gensimvis.prepare(lda_model, corpus, id2word, mds="mmds", R=10) LDAvis_prepared # ### obviously the group 7 contains variables that are used when writting the code as an exemple in a post so more preprocessing is needed # # Suppervised clusturing classification # here we need to split the dataset equaly between each categories using stratify # we need to use string of tokens for predictions df["Body_snowstem_tokenstr"] = df["Body_snowstem"].apply(lambda x: " ".join(x)) df["Body_snowstem_tokenstr"].head() # For both bectorizer, the min_df/max_df is used to ignore terms that have a document frequency strictly lower/higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. # check matrix size with adjusting vectorizer properties print( "the initial dataset has a shape of\n", CountVectorizer().fit_transform(df["Body_snowstem_tokenstr"]).shape, "\n", ) print("influence of min_df\nsmpls vs nb words") for min_df in np.arange(0.001, 0.1, 0.01): vectorizer = CountVectorizer(min_df=min_df) print(min_df, vectorizer.fit_transform(df["Body_snowstem_tokenstr"]).shape) print("\ninfluence of max_df\nsmpls vs nb words") for max_df in np.arange(0.1, 1, 0.1): vectorizer = CountVectorizer(max_df=max_df) print(max_df, vectorizer.fit_transform(df["Body_snowstem_tokenstr"]).shape) # #### previous cleaning well done because remove enough vocabulary words # **Accuracy**: the set of labels predicted for a sample must exactly match the corresponding set of labels in y_true.\ # **Rand score**: similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings. def show_results(y, yhat): return ( "Accuracy: ", accuracy_score(y_test, yhat), "ARI", np.round(metrics.adjusted_rand_score(y_test, yhat), 4), ) clf = MultinomialNB() y = LabelEncoder().fit_transform(df.tag_unique) vectorizer1 = CountVectorizer() X = vectorizer1.fit_transform(df["Body_snowstem_tokenstr"]) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=125, stratify=y ) clf.fit(X_train, y_train) yhat = clf.predict(X_test) show_results(y, yhat) vectorizer2 = TfidfVectorizer() X = vectorizer2.fit_transform(df["Body_snowstem_tokenstr"]) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=125, stratify=y ) clf.fit(X_train, y_train) yhat = clf.predict(X_test) show_results(y, yhat)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/381/129381354.ipynb
null
null
[{"Id": 129381354, "ScriptId": 37497486, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13956278, "CreationDate": "05/13/2023 09:58:03", "VersionNumber": 16.0, "Title": "Cazelles_R\u00e9mi_1_notebook_exploration_052023", "EvaluationDate": "05/13/2023", "IsChange": false, "TotalLines": 425.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 425.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd import numpy as np import seaborn as sns import nltk import matplotlib.pyplot as plt from collections import Counter from bs4 import BeautifulSoup import wordcloud from wordcloud import WordCloud import string import spacy import re from nltk.tokenize import RegexpTokenizer from nltk.corpus import stopwords from nltk.tokenize import WordPunctTokenizer from nltk.stem import WordNetLemmatizer from nltk.stem import PorterStemmer from nltk.stem.snowball import SnowballStemmer from sklearn.preprocessing import LabelEncoder from sklearn import manifold, decomposition from sklearn import cluster, metrics from textblob import TextBlob from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import accuracy_score from sklearn.feature_extraction.text import TfidfVectorizer import statsmodels.formula.api as smf import statsmodels.api as sm import time import random import gensim.corpora as corpora import gensim from pprint import pprint from gensim.models import CoherenceModel from sklearn.metrics import ( accuracy_score, confusion_matrix, ConfusionMatrixDisplay, f1_score, ) # get dataset df = pd.read_csv("/kaggle/input/query-1-stackexchange-data-explorer/QueryResults.csv") df.shape # ### initial lookup df.sample(5) # title and body both have text that can be used to generate a tag : MAYBE TO COMBINE TITLE AND BODY TEXTS? # body and tags are weirdly formated and necessicate cleaning df.corr() # viewcount and answercount are correlated to score # ### Structure df.info() # zero non null / 5 numerical columns for correlation (-1 because Id) df.nunique() # some tags are totally similar / favorite count has only 2 type of values df.dtypes.value_counts() # # df.isnull().sum() # target has no missing value df.duplicated("Body").sum() # all texts are unique df.describe(include=np.number) df["stats"] = df.AnswerCount / df.ViewCount df.stats.sort_values(ascending=False)[1000:].plot.hist(bins=100) plt.title("Average answers per viewcount ") print("In average there are 4 answers per post and 1 answer per ~10 000 views ") # # Tags analysis # Here we create the variable 'tag_unique' that takes the first tag of the list of 5 'Tags' for each post df.Tags = df.Tags.apply( lambda x: x.replace("><", " ").replace("<", "").replace(">", "") ) df["tags_token"] = df.Tags.apply(lambda x: x.split()) corpus_tag = [ tag for tag in df.tags_token for tag in tag ] # create corpus of all the tags df["tag_unique"] = df.tags_token.apply(lambda x: x[0]) corpus_tag_unique = [tag for tag in df.tag_unique] def cum_sum(list_words): df = pd.Series(list_words).value_counts() df["cum_sum"] = 100 * df.cumsum() / df.sum() return df.cum_sum fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4)) cum_sum(corpus_tag)[:25].plot.barh(ax=ax0) ax0.set_title("With All tags") cum_sum(corpus_tag_unique)[:25].plot.barh(ax=ax1) ax1.set_title("Only most used tag per post") fig.subplots_adjust(wspace=0.4) fig.suptitle("Cumulated sum of keywords usage") tags_to_keep = list( cum_sum(corpus_tag_unique)[:10].index ) # TARGETS, WE KEEP 10 of THEM print(f"We keep only the post which Tag_unique is in the top 10 list") print( f"This removes {round((100* (len(df.loc[~(df.tag_unique.isin(tags_to_keep))]))/ len(df)),2)} % of the dataset" ) print(f"the tags taken are", tags_to_keep) df = df.loc[df.tag_unique.isin(tags_to_keep)] # plt.figure() # plt.title("repartition des tags identifié come target") # df.tag_unique.value_counts().plot.pie() # we assign a number to the target in order to use the classificatio algorythms df["target_num"] = LabelEncoder().fit_transform(df.tag_unique) # # ViewCounts fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4)) fig.subplots_adjust(wspace=0.4) fig.suptitle("Distribution of log10(ViewCount)") np.log10(df.ViewCount).plot.hist(ax=ax0, bins=1000) np.log10(df.ViewCount).plot.box(ax=ax1) q1, q3 = np.quantile(np.log10(df.ViewCount), (0.25, 0.75)) flyer_bottom = q1 - 1.5 * (q3 - q1) flyer_top = q3 + 1.5 * (q3 - q1) print(f"We keep only the post which viewcount is above lower moustache") print( f"This removes {round((100* (len(df.loc[(df.ViewCount <= 10**(flyer_bottom))]))/ len(df)),2)} % of the dataset" ) df = df.loc[(df.ViewCount > 10 ** (flyer_bottom))] # &(df.ViewCount < 10**(flyer_top))] # ## Body # Here we use the package bs4 to process the html formatted Body texts # we use BeautifulSoup to parse the html formated text df.Body = df.Body.apply( lambda x: BeautifulSoup(x, "html.parser").get_text().replace("\n", " ") ) df["number_words"] = df.Body.apply(lambda x: len([word for word in x.split()])) print(" most of the message contains", np.max(df.number_words.value_counts()), "words") fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4)) fig.subplots_adjust(wspace=0.4) fig.suptitle("Distribution of numbers of words per post)") df.number_words.sort_values()[:-200].plot.hist(ax=ax0, bins=100) df.number_words.plot.box(ax=ax1, showfliers=False, showmeans=True) start_time = time.time() raw_corpus = "".join(df.Body.values) # makes a string with all the texts print("the caracters present in the post encompass special and emoticons :") print(" ".join(list(set(raw_corpus)))) print("\na cleaning step to remove special caracters and punctuation si necessary") # # Analyses mutltivariées fig, (ax0, ax1, ax2, ax3) = plt.subplots(1, 4, figsize=(12, 4), sharey=True) sns.boxplot(x=df.Score, y=df.tag_unique, showfliers=False, ax=ax0) sns.boxplot(x=df.ViewCount, y=df.tag_unique, showfliers=False, ax=ax1) sns.boxplot(x=df.AnswerCount, y=df.tag_unique, showfliers=False, ax=ax2) sns.boxplot(x=df.number_words, y=df.tag_unique, showfliers=False, ax=ax3) fig.subplots_adjust(wspace=0.8) fig.suptitle("ANOVA analysis for tag_unique") for quantit_var in ["Score", "ViewCount", "AnswerCount", "number_words"]: path = quantit_var + str("~tag_unique") to_test = df.copy() fit_model = smf.ols(path, data=to_test).fit() print("\n---->ANOVA_LM :", quantit_var, "\n", sm.stats.anova_lm(fit_model)) print( "\n there is a statistical significant between the tags and their effect on Score/ViewCount/AnswerCount" ) corr = df.corr() mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True fig, ax = plt.subplots(1, 1, figsize=(5, 4)) ax = sns.heatmap(corr, mask=mask, vmax=1, cmap="summer", annot=True, fmt=".2f") print("there is no correlation between messsage lenght and tags") # ## cleaning text # remove websites def remove_URL(text): """ Remove URLs from a sample string """ return re.sub(r"https?://\S+|www\.\S+", "", text) def remove_non_ascii(text): """ Remove non-ASCII characters """ return re.sub(r"[^\x00-\x7f]", r"", text) def remove_punct(text): """ Remove the punctuation """ return re.sub( r'[]<!"$%&\'()*,/:;=@?>[\\^_`{|}~-]+', "", text ) # keep "." and "#" and "+" to keep the target intact return text.translate(str.maketrans("", "", string.punctuation)) def remove_emojis(text): emoj = re.compile( "[" "\U0001F600-\U0001F64F" # emoticons "\U0001F300-\U0001F5FF" # symbols & pictographs "\U0001F680-\U0001F6FF" # transport & map symbols "\U0001F1E0-\U0001F1FF" # flags (iOS) "\U00002500-\U00002BEF" # chinese char "\U00002702-\U000027B0" "\U00002702-\U000027B0" "\U000024C2-\U0001F251" "\U0001f926-\U0001f937" "\U00010000-\U0010ffff" "\u2640-\u2642" "\u2600-\u2B55" "\u200d" "\u23cf" "\u23e9" "\u231a" "\ufe0f" # dingbats "\u3030" "]+", re.UNICODE, ) return re.sub(emoj, "", text) df.Body = df.Body.apply(remove_URL) print("URL removed") df.Body = df.Body.apply(remove_non_ascii) print("non-ASCII removed") df.Body = df.Body.apply(remove_punct) print("punctuation removed") df.Body = df.Body.apply(remove_emojis) print("emojis removed") df.Title = ( df.Title.apply(remove_URL) .apply(remove_non_ascii) .apply(remove_punct) .apply(remove_emojis) ) # # Pre-process text # ## tockenize mannually and bag of words # Here we use split() instead of nltk.word_tockenize def make_corpus(list_of_lists): """make a corpus from a list of list att : list of list""" return [word for word in list_of_lists.apply(lambda x: x.split()) for word in word] def tags_bag_word(make_corpus_fct, stopwords=None): """plot 4 random bag-of-words with 'Body' column from 'df' dataframe using the 'tags_to_keep' list of word to match the 'tag_unique' att : fct to make corpus from list of list""" for tag in random.sample(tags_to_keep, 4): corpus = make_corpus_fct(df.loc[df.tag_unique == tag, "Body"]) wordcloud = WordCloud( background_color="white", stopwords=stopwords, max_words=100 ).generate(" ".join(corpus)) plt.title("For the tag " + tag) plt.imshow(wordcloud) plt.axis("off") plt.show() tags_bag_word(make_corpus) # ### there are words like "new", "use", "self" ... need to be removed # # creating stopwords sets # We need to remove token that appears to many times in all targets and those appearing too little.\ # # tokenize corpus all_tokens = nltk.word_tokenize("".join(df.Body.values).lower()) all_tokens_count = Counter(all_tokens) print(f"the corpus countain {len(all_tokens_count)} uniques word") # create stop word for token appearing too little at least 10 times min_freq_tokens = [] for token, count in all_tokens_count.most_common(): if count < 6: min_freq_tokens.append(token) stopwords_min_freq = set(min_freq_tokens) print(f"the corpus countain {len(stopwords_min_freq)} tokens used 5 times or less") # tokenize corpus per tag max_freq_tokens = [] for tag in tags_to_keep: print(f"considering the {tag} tag") df_temp = df.loc[df.tag_unique == tag] tokens_tag = nltk.word_tokenize("".join(df_temp.Body.values).lower()) print(f"there are {len(set(tokens_tag))} unique tokens") max_freq_tokens.append([i for i, k in Counter(tokens_tag).most_common(200)]) words_max_freq = Counter([word for vec in max_freq_tokens for word in vec]) stopwords_max_freq = set([word for word, count in words_max_freq.items() if count > 5]) print( f"\namoung the 200 most comon token in each categorie, there are {len(stopwords_max_freq)} that are comon to at least 6 categories" ) stopwords = stopwords_min_freq.union(stopwords_max_freq) tags_bag_word(make_corpus, stopwords) # now we have topics clearly identified after removing the most common words, so we'll try LDA to see if topics emerge from themselves print(f"the min_freq stopwords set contains {len(stopwords_min_freq)} words") print(f"the max_freq stopwords set contains {len(stopwords_max_freq)} words") print( f"there are {len(stopwords.intersection(nltk.corpus.stopwords.words('english')))} words already in nltk stopwords set" ) print( f"the nltk stopwords set contains {len(nltk.corpus.stopwords.words('english'))} words" ) # ## final preprocessing def make_tokens(text): return nltk.word_tokenize(text.lower().strip()) def remove_sw_tokens(text, more_stopwords=None): if more_stopwords: stop_words = stopwords.union(nltk.corpus.stopwords.words("english")) else: stop_words = nltk.corpus.stopwords.words("english") return [word for word in text if word not in stopwords] def remove_stopwords_texts(texts): return [ [word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts ] def snow_stemmer(text): snow_stemmer = SnowballStemmer(language="english") return [snow_stemmer.stem(word) for word in text] #!unzip /usr/share/nltk_data/corpora/wordnet.zip -d /usr/share/nltk_data/corpora/ # def lemma_fct(text) : # lemmatizer = WordNetLemmatizer() # return [lemmatizer.lemmatize(word) for word in text] # Apply stemming to the text nlp = spacy.load("en_core_web_sm") def spacy(text): return [ token.lemma_ for token in nlp(text) if not (token.is_stop or token.is_punct) ] df["Body_original"] = df["Body"].copy() df.Body = df.Body.apply(make_tokens) df["Body_nostop"] = df.Body.apply(lambda x: remove_sw_tokens(x)) df["Body_snowstem"] = df.Body_nostop.apply(snow_stemmer) # df['Body_lemma'] = df.Body_nostop.apply(lemma_fct) # df['Body_spacy'] = df.Body_nostop.apply(spcay) # # Export dataset for further analysis df.to_csv( "Cleaned_SOF_OCP_P5.csv", encoding="utf-8", sep="\t", header=True, index=False ) # # unsupervized clusturing with LDA # ### https://towardsdatascience.com/evaluate-topic-model-in-python-latent-dirichlet-allocation-lda-7d57484bb5d0 data_snowstem = df.Body_snowstem.values.tolist() print( "Exemple of sentence after wtopwords removal and stemmatization :\n", data_snowstem[:1][0][:30], ) id2word = corpora.Dictionary(data_snowstem) texts = data_snowstem corpus = [id2word.doc2bow(text) for text in data_snowstem] # Term Document Frequency print("The same sentence transformed by the bag of word :\n", corpus[:1][0][:30]) # instantiate the model lda_model = gensim.models.LdaMulticore( corpus=corpus, id2word=id2word, num_topics=10, # as many as there are tags selected random_state=100, chunksize=100, passes=10, per_word_topics=True, ) pprint(lda_model.print_topics()) doc_lda = lda_model[corpus] # Compute Coherence Score coherence_model_lda = CoherenceModel( model=lda_model, texts=data_snowstem, dictionary=id2word, coherence="c_v" ) coherence_lda = coherence_model_lda.get_coherence() print("\n***************\nCoherence Score: ", coherence_lda) # import pyLDAvis.gensim_models as gensimvis import pickle import pyLDAvis from pyLDAvis import gensim as gensimvis import os # Visualize the topics pyLDAvis.enable_notebook() LDAvis_prepared = gensimvis.prepare(lda_model, corpus, id2word, mds="mmds", R=10) LDAvis_prepared # ### obviously the group 7 contains variables that are used when writting the code as an exemple in a post so more preprocessing is needed # # Suppervised clusturing classification # here we need to split the dataset equaly between each categories using stratify # we need to use string of tokens for predictions df["Body_snowstem_tokenstr"] = df["Body_snowstem"].apply(lambda x: " ".join(x)) df["Body_snowstem_tokenstr"].head() # For both bectorizer, the min_df/max_df is used to ignore terms that have a document frequency strictly lower/higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. # check matrix size with adjusting vectorizer properties print( "the initial dataset has a shape of\n", CountVectorizer().fit_transform(df["Body_snowstem_tokenstr"]).shape, "\n", ) print("influence of min_df\nsmpls vs nb words") for min_df in np.arange(0.001, 0.1, 0.01): vectorizer = CountVectorizer(min_df=min_df) print(min_df, vectorizer.fit_transform(df["Body_snowstem_tokenstr"]).shape) print("\ninfluence of max_df\nsmpls vs nb words") for max_df in np.arange(0.1, 1, 0.1): vectorizer = CountVectorizer(max_df=max_df) print(max_df, vectorizer.fit_transform(df["Body_snowstem_tokenstr"]).shape) # #### previous cleaning well done because remove enough vocabulary words # **Accuracy**: the set of labels predicted for a sample must exactly match the corresponding set of labels in y_true.\ # **Rand score**: similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings. def show_results(y, yhat): return ( "Accuracy: ", accuracy_score(y_test, yhat), "ARI", np.round(metrics.adjusted_rand_score(y_test, yhat), 4), ) clf = MultinomialNB() y = LabelEncoder().fit_transform(df.tag_unique) vectorizer1 = CountVectorizer() X = vectorizer1.fit_transform(df["Body_snowstem_tokenstr"]) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=125, stratify=y ) clf.fit(X_train, y_train) yhat = clf.predict(X_test) show_results(y, yhat) vectorizer2 = TfidfVectorizer() X = vectorizer2.fit_transform(df["Body_snowstem_tokenstr"]) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=125, stratify=y ) clf.fit(X_train, y_train) yhat = clf.predict(X_test) show_results(y, yhat)
false
0
5,629
0
5,629
5,629