file_id
stringlengths
5
9
content
stringlengths
100
5.25M
local_path
stringlengths
66
70
kaggle_dataset_name
stringlengths
3
50
kaggle_dataset_owner
stringlengths
3
20
kversion
stringlengths
497
763
kversion_datasetsources
stringlengths
71
5.46k
dataset_versions
stringlengths
338
235k
datasets
stringlengths
334
371
users
stringlengths
111
264
script
stringlengths
100
5.25M
df_info
stringlengths
0
4.87M
has_data_info
bool
2 classes
nb_filenames
int64
0
370
retreived_data_description
stringlengths
0
4.44M
script_nb_tokens
int64
25
663k
upvotes
int64
0
1.65k
tokens_description
int64
25
663k
tokens_script
int64
25
663k
69235352
<jupyter_start><jupyter_text>jigsaw_public_baseline_results Kaggle dataset identifier: jigsaw-public-baseline-results <jupyter_code>import pandas as pd df = pd.read_csv('jigsaw-public-baseline-results/submission.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 63812 entries, 0 to 63811 Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 63812 non-null int64 1 toxic 63812 non-null float64 dtypes: float64(1), int64(1) memory usage: 997.2 KB <jupyter_text>Examples: { "id": 0.0, "toxic": 0.038574218800000004 } { "id": 1.0, "toxic": 0.0555419922 } { "id": 2.0, "toxic": 0.515625 } { "id": 3.0, "toxic": 0.0489501953 } <jupyter_script># ## Training Pipeline by [@shonenkov](https://www.kaggle.com/shonenkov) using multi TPU on PyTorch/XLA # Hi everyone! # My name is Alex Shonenkov, I am researcher, in Love with NLP and DL. # Recently I have published my ideas about this competition: # - [[TPU-Inference] Super Fast XLMRoberta](https://www.kaggle.com/shonenkov/tpu-inference-super-fast-xlmroberta) # - [NLP Albumentations](https://www.kaggle.com/shonenkov/nlp-albumentations) # - [Hack with Parallel Corpus](https://www.kaggle.com/shonenkov/hack-with-parallel-corpus) # - [Class Balance with PyTorch/XLA](https://www.kaggle.com/shonenkov/class-balance-with-pytorch-xla) # - [open-subtitles-toxic-pseudo-labeling](https://www.kaggle.com/shonenkov/open-subtitles-toxic-pseudo-labeling) # if you didn't see this kernels and datasets, I recommend to read all of them because it may help you for better understand this kernel and achieve success in competition :) # ## MAIN IDEA # I spent a lot of time for create working kernel on the kaggle, I have tried to optimize it for 16GB RAM. But I was not able to do it for distributed MULTI TPU here, because of my datasets is too big for this aims. # Here I would like to demonstrate my training pipeline without running and also I would like to provide you, my firends, prepared Colab notebook with kaggle structure! # So lets start! # # 修改區域 # 此份程式使用 pytorch 進行訓練,且善用 XLA 可以進一步使用 Kaggle 的 TPU 進行訓練,可以進一步進行加速 # 此部分可以由添加的環境變數 `os.environ['XLA_USE_BF16'] = "1"` 了解 # - bfloat16 參考資料 https://cloud.google.com/tpu/docs/bfloat16?hl=zh-tw # 簡單來說,就是一個可以搭配 TPU 運算使用的浮點數格式 # # 原本底下的 Code 應該由上方完成,但其安裝網址及版本有誤,或甚至過期導致無法正常安裝 # 經由底下的修改正常化後續使用到相關 XLA 的 Code,避免無法呼叫的錯誤。 # # - XLA (加速線性代數) 參考資料 https://www.tensorflow.org/xla?hl=zh-tw # - Pytorch 之 XLA 官方 Code https://github.com/pytorch/xla # 下載 https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py ,命名為 pytorch-xla-env-setup.py # 安裝 配有 XLA 之 Pytorch,版本為 nightly ,可理解為最新但非穩定版本 import numpy as np import pandas as pd import os os.environ["XLA_USE_BF16"] = "1" from glob import glob import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader from torch.autograd import Variable from torch.utils.data.sampler import SequentialSampler, RandomSampler import sklearn import time import random from datetime import datetime from tqdm import tqdm tqdm.pandas() from transformers import BertModel, BertTokenizer from transformers import XLMRobertaModel, XLMRobertaTokenizer from transformers import AdamW, get_linear_schedule_with_warmup, get_constant_schedule from catalyst.data.sampler import DistributedSamplerWrapper, BalanceClassSampler import gc import re # !pip install nltk > /dev/null import nltk nltk.download("punkt") from nltk import sent_tokenize from pandarallel import pandarallel pandarallel.initialize(nb_workers=4, progress_bar=False) SEED = 42 MAX_LENGTH = 224 BACKBONE_PATH = "xlm-roberta-large" ROOT_PATH = f".." # ROOT_PATH = f'/content/drive/My Drive/jigsaw2020-kaggle-public-baseline' # for colab def seed_everything(seed): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True seed_everything(SEED) # ### [NLP Albumentations](https://www.kaggle.com/shonenkov/nlp-albumentations) from nltk import sent_tokenize from random import shuffle import random import albumentations from albumentations.core.transforms_interface import DualTransform, BasicTransform LANGS = { "en": "english", "it": "italian", "fr": "french", "es": "spanish", "tr": "turkish", "ru": "russian", "pt": "portuguese", } def get_sentences(text, lang="en"): return sent_tokenize(text, LANGS.get(lang, "english")) def exclude_duplicate_sentences(text, lang="en"): sentences = [] for sentence in get_sentences(text, lang): sentence = sentence.strip() if sentence not in sentences: sentences.append(sentence) return " ".join(sentences) def clean_text(text, lang="en"): text = str(text) text = re.sub(r'[0-9"]', "", text) text = re.sub(r"#[\S]+\b", "", text) text = re.sub(r"@[\S]+\b", "", text) text = re.sub(r"https?\S+", "", text) text = re.sub(r"\s+", " ", text) text = exclude_duplicate_sentences(text, lang) return text.strip() class NLPTransform(BasicTransform): """Transform for nlp task.""" @property def targets(self): return {"data": self.apply} def update_params(self, params, **kwargs): if hasattr(self, "interpolation"): params["interpolation"] = self.interpolation if hasattr(self, "fill_value"): params["fill_value"] = self.fill_value return params def get_sentences(self, text, lang="en"): return sent_tokenize(text, LANGS.get(lang, "english")) class ShuffleSentencesTransform(NLPTransform): """Do shuffle by sentence""" def __init__(self, always_apply=False, p=0.5): super(ShuffleSentencesTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data sentences = self.get_sentences(text, lang) random.shuffle(sentences) return " ".join(sentences), lang class ExcludeDuplicateSentencesTransform(NLPTransform): """Exclude equal sentences""" def __init__(self, always_apply=False, p=0.5): super(ExcludeDuplicateSentencesTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data sentences = [] for sentence in self.get_sentences(text, lang): sentence = sentence.strip() if sentence not in sentences: sentences.append(sentence) return " ".join(sentences), lang class ExcludeNumbersTransform(NLPTransform): """exclude any numbers""" def __init__(self, always_apply=False, p=0.5): super(ExcludeNumbersTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data text = re.sub(r"[0-9]", "", text) text = re.sub(r"\s+", " ", text) return text, lang class ExcludeHashtagsTransform(NLPTransform): """Exclude any hashtags with #""" def __init__(self, always_apply=False, p=0.5): super(ExcludeHashtagsTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data text = re.sub(r"#[\S]+\b", "", text) text = re.sub(r"\s+", " ", text) return text, lang class ExcludeUsersMentionedTransform(NLPTransform): """Exclude @users""" def __init__(self, always_apply=False, p=0.5): super(ExcludeUsersMentionedTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data text = re.sub(r"@[\S]+\b", "", text) text = re.sub(r"\s+", " ", text) return text, lang class ExcludeUrlsTransform(NLPTransform): """Exclude urls""" def __init__(self, always_apply=False, p=0.5): super(ExcludeUrlsTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data text = re.sub(r"https?\S+", "", text) text = re.sub(r"\s+", " ", text) return text, lang # ### [Pseudo-labeling with open-subtitles](https://www.kaggle.com/shonenkov/hack-with-parallel-corpus) # More noise with mix of languages can help. I have used [pseudo-labeled open-subtitles dataset](https://www.kaggle.com/shonenkov/open-subtitles-toxic-pseudo-labeling) for this approach. # It is some analogue for Cutmix in Computer Vision: # class SynthesicOpenSubtitlesTransform(NLPTransform): def __init__(self, always_apply=False, p=0.5): super(SynthesicOpenSubtitlesTransform, self).__init__(always_apply, p) df = pd.read_csv( f"{ROOT_PATH}/input/open-subtitles-toxic-pseudo-labeling/open-subtitles-synthesic.csv", index_col="id", )[["comment_text", "toxic", "lang"]] df = df[~df["comment_text"].isna()] df["comment_text"] = df.parallel_apply( lambda x: clean_text(x["comment_text"], x["lang"]), axis=1 ) df = df.drop_duplicates(subset="comment_text") df["toxic"] = df["toxic"].round().astype(np.int) self.synthesic_toxic = df[df["toxic"] == 1].comment_text.values self.synthesic_non_toxic = df[df["toxic"] == 0].comment_text.values del df gc.collect() def generate_synthesic_sample(self, text, toxic): texts = [text] if toxic == 0: for i in range(random.randint(1, 5)): texts.append(random.choice(self.synthesic_non_toxic)) else: for i in range(random.randint(0, 2)): texts.append(random.choice(self.synthesic_non_toxic)) for i in range(random.randint(1, 3)): texts.append(random.choice(self.synthesic_toxic)) random.shuffle(texts) return " ".join(texts) def apply(self, data, **params): text, toxic = data text = self.generate_synthesic_sample(text, toxic) return text, toxic def get_train_transforms(): return albumentations.Compose( [ ExcludeUsersMentionedTransform(p=0.95), ExcludeUrlsTransform(p=0.95), ExcludeNumbersTransform(p=0.95), ExcludeHashtagsTransform(p=0.95), ExcludeDuplicateSentencesTransform(p=0.95), ], p=1.0, ) def get_synthesic_transforms(): return SynthesicOpenSubtitlesTransform(p=0.5) train_transforms = get_train_transforms() synthesic_transforms = get_synthesic_transforms() tokenizer = XLMRobertaTokenizer.from_pretrained(BACKBONE_PATH) shuffle_transforms = ShuffleSentencesTransform(always_apply=True) def onehot(size, target): vec = torch.zeros(size, dtype=torch.float32) vec[target] = 1.0 return vec class DatasetRetriever(Dataset): def __init__( self, labels_or_ids, comment_texts, langs, use_train_transforms=False, test=False, ): self.test = test self.labels_or_ids = labels_or_ids self.comment_texts = comment_texts self.langs = langs self.use_train_transforms = use_train_transforms def get_tokens(self, text): encoded = tokenizer.encode_plus( text, add_special_tokens=True, max_length=MAX_LENGTH, pad_to_max_length=True ) return encoded["input_ids"], encoded["attention_mask"] def __len__(self): return self.comment_texts.shape[0] def __getitem__(self, idx): text = self.comment_texts[idx] lang = self.langs[idx] if self.test is False: label = self.labels_or_ids[idx] target = onehot(2, label) if self.use_train_transforms: text, _ = train_transforms(data=(text, lang))["data"] tokens, attention_mask = self.get_tokens(str(text)) token_length = sum(attention_mask) if token_length > 0.8 * MAX_LENGTH: text, _ = shuffle_transforms(data=(text, lang))["data"] elif token_length < 60: text, _ = synthesic_transforms(data=(text, label))["data"] else: tokens, attention_mask = torch.tensor(tokens), torch.tensor( attention_mask ) return target, tokens, attention_mask tokens, attention_mask = self.get_tokens(str(text)) tokens, attention_mask = torch.tensor(tokens), torch.tensor(attention_mask) if self.test is False: return target, tokens, attention_mask return self.labels_or_ids[idx], tokens, attention_mask def get_labels(self): return list(np.char.add(self.labels_or_ids.astype(str), self.langs)) # ### Here I have used [this kernel](https://www.kaggle.com/shonenkov/prepare-training-data) for merging all train data df_train = pd.read_csv( f"{ROOT_PATH}/input/jigsaw-public-baseline-train-data/train_data.csv" ) train_dataset = DatasetRetriever( labels_or_ids=df_train["toxic"].values, comment_texts=df_train["comment_text"].values, langs=df_train["lang"].values, use_train_transforms=True, ) del df_train gc.collect() for targets, tokens, attention_masks in train_dataset: break print(targets) print(tokens.shape) print(attention_masks.shape) # ### Class Balance # After some experiments I have decided that [class balance](https://www.kaggle.com/shonenkov/class-balance-with-pytorch-xla) in this competition is very important. Also I noticed impact if use balancing dataset by languages. # Here you can see unique values for get_labels method: np.unique(train_dataset.get_labels()) df_val = pd.read_csv( f"{ROOT_PATH}/input/jigsaw-multilingual-toxic-comment-classification/validation.csv", index_col="id", ) validation_tune_dataset = DatasetRetriever( labels_or_ids=df_val["toxic"].values, comment_texts=df_val["comment_text"].values, langs=df_val["lang"].values, use_train_transforms=True, ) df_val["comment_text"] = df_val.parallel_apply( lambda x: clean_text(x["comment_text"], x["lang"]), axis=1 ) validation_dataset = DatasetRetriever( labels_or_ids=df_val["toxic"].values, comment_texts=df_val["comment_text"].values, langs=df_val["lang"].values, use_train_transforms=False, ) del df_val gc.collect() for targets, tokens, attention_masks in validation_dataset: break print(targets) print(tokens.shape) print(attention_masks.shape) df_test = pd.read_csv( f"{ROOT_PATH}/input/jigsaw-multilingual-toxic-comment-classification/test.csv", index_col="id", ) df_test["comment_text"] = df_test.parallel_apply( lambda x: clean_text(x["content"], x["lang"]), axis=1 ) test_dataset = DatasetRetriever( labels_or_ids=df_test.index.values, comment_texts=df_test["comment_text"].values, langs=df_test["lang"].values, use_train_transforms=False, test=True, ) del df_test gc.collect() for ids, tokens, attention_masks in test_dataset: break print(ids) print(tokens.shape) print(attention_masks.shape) class RocAucMeter(object): def __init__(self): self.reset() def reset(self): self.y_true = np.array([0, 1]) self.y_pred = np.array([0.5, 0.5]) self.score = 0 def update(self, y_true, y_pred): y_true = y_true.cpu().numpy().argmax(axis=1) y_pred = nn.functional.softmax(y_pred, dim=1).data.cpu().numpy()[:, 1] self.y_true = np.hstack((self.y_true, y_true)) self.y_pred = np.hstack((self.y_pred, y_pred)) self.score = sklearn.metrics.roc_auc_score( self.y_true, self.y_pred, labels=np.array([0, 1]) ) @property def avg(self): return self.score class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count # ### Label Smoothing is all you need # Now we can use translating and augmenting data for training with this Loss: class LabelSmoothing(nn.Module): def __init__(self, smoothing=0.1): super(LabelSmoothing, self).__init__() self.confidence = 1.0 - smoothing self.smoothing = smoothing def forward(self, x, target): if self.training: x = x.float() target = target.float() logprobs = torch.nn.functional.log_softmax(x, dim=-1) nll_loss = -logprobs * target nll_loss = nll_loss.sum(-1) smooth_loss = -logprobs.mean(dim=-1) loss = self.confidence * nll_loss + self.smoothing * smooth_loss return loss.mean() else: return torch.nn.functional.cross_entropy(x, target) # # Custom TPU Fitter # # P.S. Lets go to do contributing [Catalyst](https://github.com/catalyst-team/catalyst) with TPU backend :) # # 可注意區域 # 下方 code 即可看到有引入相關 `torch_xla`的部分,若沒有正常安裝則無法正確執行底下的 code 。 import warnings warnings.filterwarnings("ignore") import torch_xla import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl import torch_xla.distributed.xla_multiprocessing as xmp from catalyst.data.sampler import DistributedSamplerWrapper, BalanceClassSampler class TPUFitter: def __init__(self, model, device, config): if not os.path.exists("node_submissions"): os.makedirs("node_submissions") self.config = config self.epoch = 0 self.log_path = "log.txt" self.model = model self.device = device param_optimizer = list(self.model.named_parameters()) no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in param_optimizer if not any(nd in n for nd in no_decay) ], "weight_decay": 0.001, }, { "params": [ p for n, p in param_optimizer if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] self.optimizer = AdamW( optimizer_grouped_parameters, lr=config.lr * xm.xrt_world_size() ) self.scheduler = config.SchedulerClass( self.optimizer, **config.scheduler_params ) self.criterion = config.criterion xm.master_print(f"Fitter prepared. Device is {self.device}") def fit(self, train_loader, validation_loader): for e in range(self.config.n_epochs): if self.config.verbose: lr = self.optimizer.param_groups[0]["lr"] timestamp = datetime.utcnow().isoformat() self.log(f"\n{timestamp}\nLR: {lr}") t = time.time() para_loader = pl.ParallelLoader(train_loader, [self.device]) losses, final_scores = self.train_one_epoch( para_loader.per_device_loader(self.device) ) self.log( f"[RESULT]: Train. Epoch: {self.epoch}, loss: {losses.avg:.5f}, final_score: {final_scores.avg:.5f}, time: {(time.time() - t):.5f}" ) t = time.time() para_loader = pl.ParallelLoader(validation_loader, [self.device]) losses, final_scores = self.validation( para_loader.per_device_loader(self.device) ) self.log( f"[RESULT]: Validation. Epoch: {self.epoch}, loss: {losses.avg:.5f}, final_score: {final_scores.avg:.5f}, time: {(time.time() - t):.5f}" ) if self.config.validation_scheduler: self.scheduler.step(metrics=final_scores.avg) self.epoch += 1 def run_tuning_and_inference(self, test_loader, validation_tune_loader): for e in range(2): self.optimizer.param_groups[0]["lr"] = ( self.config.lr * xm.xrt_world_size() / (e + 1) ) para_loader = pl.ParallelLoader(validation_tune_loader, [self.device]) losses, final_scores = self.train_one_epoch( para_loader.per_device_loader(self.device) ) para_loader = pl.ParallelLoader(test_loader, [self.device]) self.run_inference(para_loader.per_device_loader(self.device)) def validation(self, val_loader): self.model.eval() losses = AverageMeter() final_scores = RocAucMeter() t = time.time() for step, (targets, inputs, attention_masks) in enumerate(val_loader): if self.config.verbose: if step % self.config.verbose_step == 0: xm.master_print( f"Valid Step {step}, loss: " + f"{losses.avg:.5f}, final_score: {final_scores.avg:.5f}, " + f"time: {(time.time() - t):.5f}" ) with torch.no_grad(): inputs = inputs.to(self.device, dtype=torch.long) attention_masks = attention_masks.to(self.device, dtype=torch.long) targets = targets.to(self.device, dtype=torch.float) outputs = self.model(inputs, attention_masks) loss = self.criterion(outputs, targets) batch_size = inputs.size(0) final_scores.update(targets, outputs) losses.update(loss.detach().item(), batch_size) return losses, final_scores def train_one_epoch(self, train_loader): self.model.train() losses = AverageMeter() final_scores = RocAucMeter() t = time.time() for step, (targets, inputs, attention_masks) in enumerate(train_loader): if self.config.verbose: if step % self.config.verbose_step == 0: self.log( f"Train Step {step}, loss: " + f"{losses.avg:.5f}, final_score: {final_scores.avg:.5f}, " + f"time: {(time.time() - t):.5f}" ) inputs = inputs.to(self.device, dtype=torch.long) attention_masks = attention_masks.to(self.device, dtype=torch.long) targets = targets.to(self.device, dtype=torch.float) self.optimizer.zero_grad() outputs = self.model(inputs, attention_masks) loss = self.criterion(outputs, targets) batch_size = inputs.size(0) final_scores.update(targets, outputs) losses.update(loss.detach().item(), batch_size) loss.backward() xm.optimizer_step(self.optimizer) if self.config.step_scheduler: self.scheduler.step() self.model.eval() self.save("last-checkpoint.bin") return losses, final_scores def run_inference(self, test_loader): self.model.eval() result = {"id": [], "toxic": []} t = time.time() for step, (ids, inputs, attention_masks) in enumerate(test_loader): if self.config.verbose: if step % self.config.verbose_step == 0: xm.master_print( f"Prediction Step {step}, time: {(time.time() - t):.5f}" ) with torch.no_grad(): inputs = inputs.to(self.device, dtype=torch.long) attention_masks = attention_masks.to(self.device, dtype=torch.long) outputs = self.model(inputs, attention_masks) toxics = nn.functional.softmax(outputs, dim=1).data.cpu().numpy()[:, 1] result["id"].extend(ids.cpu().numpy()) result["toxic"].extend(toxics) result = pd.DataFrame(result) node_count = len(glob("node_submissions/*.csv")) result.to_csv( f"node_submissions/submission_{node_count}_{datetime.utcnow().microsecond}_{random.random()}.csv", index=False, ) def save(self, path): xm.save(self.model.state_dict(), path) def log(self, message): if self.config.verbose: xm.master_print(message) with open(self.log_path, "a+") as logger: xm.master_print(f"{message}", logger) # ### Model from transformers import XLMRobertaModel class ToxicSimpleNNModel(nn.Module): def __init__(self): super(ToxicSimpleNNModel, self).__init__() self.backbone = XLMRobertaModel.from_pretrained(BACKBONE_PATH) self.dropout = nn.Dropout(0.3) self.linear = nn.Linear( in_features=self.backbone.pooler.dense.out_features * 2, out_features=2, ) def forward(self, input_ids, attention_masks): bs, seq_length = input_ids.shape seq_x, _ = self.backbone(input_ids=input_ids, attention_mask=attention_masks) apool = torch.mean(seq_x, 1) mpool, _ = torch.max(seq_x, 1) x = torch.cat((apool, mpool), 1) x = self.dropout(x) return self.linear(x) net = ToxicSimpleNNModel() # # Custom Config class TrainGlobalConfig: num_workers = 0 batch_size = 16 n_epochs = 3 lr = 0.5 * 1e-5 # ------------------- verbose = True verbose_step = 50 # ------------------- # -------------------- step_scheduler = False # do scheduler.step after optimizer.step validation_scheduler = True # do scheduler.step after validation stage loss SchedulerClass = torch.optim.lr_scheduler.ReduceLROnPlateau scheduler_params = dict( mode="max", factor=0.7, patience=0, verbose=False, threshold=0.0001, threshold_mode="abs", cooldown=0, min_lr=1e-8, eps=1e-08, ) # -------------------- # ------------------- criterion = LabelSmoothing() # ------------------- # ### Main method def _mp_fn(rank, flags): device = xm.xla_device() net.to(device) train_sampler = DistributedSamplerWrapper( sampler=BalanceClassSampler( labels=train_dataset.get_labels(), mode="downsampling" ), num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=True, ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=TrainGlobalConfig.batch_size, sampler=train_sampler, pin_memory=False, drop_last=True, num_workers=TrainGlobalConfig.num_workers, ) validation_sampler = torch.utils.data.distributed.DistributedSampler( validation_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=False, ) validation_loader = torch.utils.data.DataLoader( validation_dataset, batch_size=TrainGlobalConfig.batch_size, sampler=validation_sampler, pin_memory=False, drop_last=False, num_workers=TrainGlobalConfig.num_workers, ) validation_tune_sampler = torch.utils.data.distributed.DistributedSampler( validation_tune_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=True, ) validation_tune_loader = torch.utils.data.DataLoader( validation_tune_dataset, batch_size=TrainGlobalConfig.batch_size, sampler=validation_tune_sampler, pin_memory=False, drop_last=False, num_workers=TrainGlobalConfig.num_workers, ) test_sampler = torch.utils.data.distributed.DistributedSampler( test_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=False, ) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=TrainGlobalConfig.batch_size, sampler=test_sampler, pin_memory=False, drop_last=False, num_workers=TrainGlobalConfig.num_workers, ) if rank == 0: time.sleep(1) fitter = TPUFitter(model=net, device=device, config=TrainGlobalConfig) fitter.fit(train_loader, validation_loader) fitter.run_tuning_and_inference(test_loader, validation_tune_loader) # ## Colab Notebook # I hope Kaggle Team will increase RAM memory for tpu notebook as soon as possible. But now I recommend you use colab pro with HIGH RAM mode :) # [Here](https://drive.google.com/drive/folders/1hbcSRfvtTTlERs7remsRST2amIWAFVry?usp=sharing) I have created public read-only google drive with colab notebook! You can save copy and start training right now! # Also you can run this code here with nprocs=1, if you need. It works! But it is very slow (~1.5 P100). # FLAGS={} # xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=8, start_method='fork') # submission = pd.concat([pd.read_csv(path) for path in glob('node_submissions/*.csv')]).groupby('id').mean() # submission['toxic'].hist(bins=100) # Let's imagine that this logs have got using Kaggle: file = open("../input/jigsaw-public-baseline-results/log.txt", "r") for line in file.readlines(): print(line[:-1]) file.close() # This model should be trained ~10 epoch, I have run only 3 epoch for this kernel. # ## Submission # If you want to get high score ~0.945-0.946 such as [[TPU-Inference] Super Fast XLMRoberta](https://www.kaggle.com/shonenkov/tpu-inference-super-fast-xlmroberta) you should do blend such as [here](https://www.kaggle.com/hamditarek/ensemble), but I would like to make submission with only this kernel submission = pd.read_csv( "../input/jigsaw-public-baseline-results/submission.csv", index_col="id" ) submission.hist(bins=100) submission.to_csv("submission.csv")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/235/69235352.ipynb
jigsaw-public-baseline-results
shonenkov
[{"Id": 69235352, "ScriptId": 18893585, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1770576, "CreationDate": "07/28/2021 12:46:33", "VersionNumber": 2.0, "Title": "[TPU-Training] Super Fast XLMRoberta", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 836.0, "LinesInsertedFromPrevious": 17.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 819.0, "LinesInsertedFromFork": 20.0, "LinesDeletedFromFork": 0.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 816.0, "TotalVotes": 0}]
[{"Id": 92157153, "KernelVersionId": 69235352, "SourceDatasetVersionId": 1130062}, {"Id": 92157151, "KernelVersionId": 69235352, "SourceDatasetVersionId": 1126366}, {"Id": 92157152, "KernelVersionId": 69235352, "SourceDatasetVersionId": 1127705}]
[{"Id": 1130062, "DatasetId": 635824, "DatasourceVersionId": 1160562, "CreatorUserId": 1920073, "LicenseName": "Unknown", "CreationDate": "05/04/2020 19:20:28", "VersionNumber": 1.0, "Title": "jigsaw_public_baseline_results", "Slug": "jigsaw-public-baseline-results", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 635824, "CreatorUserId": 1920073, "OwnerUserId": 1920073.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1130062.0, "CurrentDatasourceVersionId": 1160562.0, "ForumId": 650092, "Type": 2, "CreationDate": "05/04/2020 19:20:28", "LastActivityDate": "05/04/2020", "TotalViews": 1006, "TotalDownloads": 229, "TotalVotes": 5, "TotalKernels": 3}]
[{"Id": 1920073, "UserName": "shonenkov", "DisplayName": "Alex Shonenkov", "RegisterDate": "05/17/2018", "PerformanceTier": 4}]
# ## Training Pipeline by [@shonenkov](https://www.kaggle.com/shonenkov) using multi TPU on PyTorch/XLA # Hi everyone! # My name is Alex Shonenkov, I am researcher, in Love with NLP and DL. # Recently I have published my ideas about this competition: # - [[TPU-Inference] Super Fast XLMRoberta](https://www.kaggle.com/shonenkov/tpu-inference-super-fast-xlmroberta) # - [NLP Albumentations](https://www.kaggle.com/shonenkov/nlp-albumentations) # - [Hack with Parallel Corpus](https://www.kaggle.com/shonenkov/hack-with-parallel-corpus) # - [Class Balance with PyTorch/XLA](https://www.kaggle.com/shonenkov/class-balance-with-pytorch-xla) # - [open-subtitles-toxic-pseudo-labeling](https://www.kaggle.com/shonenkov/open-subtitles-toxic-pseudo-labeling) # if you didn't see this kernels and datasets, I recommend to read all of them because it may help you for better understand this kernel and achieve success in competition :) # ## MAIN IDEA # I spent a lot of time for create working kernel on the kaggle, I have tried to optimize it for 16GB RAM. But I was not able to do it for distributed MULTI TPU here, because of my datasets is too big for this aims. # Here I would like to demonstrate my training pipeline without running and also I would like to provide you, my firends, prepared Colab notebook with kaggle structure! # So lets start! # # 修改區域 # 此份程式使用 pytorch 進行訓練,且善用 XLA 可以進一步使用 Kaggle 的 TPU 進行訓練,可以進一步進行加速 # 此部分可以由添加的環境變數 `os.environ['XLA_USE_BF16'] = "1"` 了解 # - bfloat16 參考資料 https://cloud.google.com/tpu/docs/bfloat16?hl=zh-tw # 簡單來說,就是一個可以搭配 TPU 運算使用的浮點數格式 # # 原本底下的 Code 應該由上方完成,但其安裝網址及版本有誤,或甚至過期導致無法正常安裝 # 經由底下的修改正常化後續使用到相關 XLA 的 Code,避免無法呼叫的錯誤。 # # - XLA (加速線性代數) 參考資料 https://www.tensorflow.org/xla?hl=zh-tw # - Pytorch 之 XLA 官方 Code https://github.com/pytorch/xla # 下載 https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py ,命名為 pytorch-xla-env-setup.py # 安裝 配有 XLA 之 Pytorch,版本為 nightly ,可理解為最新但非穩定版本 import numpy as np import pandas as pd import os os.environ["XLA_USE_BF16"] = "1" from glob import glob import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader from torch.autograd import Variable from torch.utils.data.sampler import SequentialSampler, RandomSampler import sklearn import time import random from datetime import datetime from tqdm import tqdm tqdm.pandas() from transformers import BertModel, BertTokenizer from transformers import XLMRobertaModel, XLMRobertaTokenizer from transformers import AdamW, get_linear_schedule_with_warmup, get_constant_schedule from catalyst.data.sampler import DistributedSamplerWrapper, BalanceClassSampler import gc import re # !pip install nltk > /dev/null import nltk nltk.download("punkt") from nltk import sent_tokenize from pandarallel import pandarallel pandarallel.initialize(nb_workers=4, progress_bar=False) SEED = 42 MAX_LENGTH = 224 BACKBONE_PATH = "xlm-roberta-large" ROOT_PATH = f".." # ROOT_PATH = f'/content/drive/My Drive/jigsaw2020-kaggle-public-baseline' # for colab def seed_everything(seed): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True seed_everything(SEED) # ### [NLP Albumentations](https://www.kaggle.com/shonenkov/nlp-albumentations) from nltk import sent_tokenize from random import shuffle import random import albumentations from albumentations.core.transforms_interface import DualTransform, BasicTransform LANGS = { "en": "english", "it": "italian", "fr": "french", "es": "spanish", "tr": "turkish", "ru": "russian", "pt": "portuguese", } def get_sentences(text, lang="en"): return sent_tokenize(text, LANGS.get(lang, "english")) def exclude_duplicate_sentences(text, lang="en"): sentences = [] for sentence in get_sentences(text, lang): sentence = sentence.strip() if sentence not in sentences: sentences.append(sentence) return " ".join(sentences) def clean_text(text, lang="en"): text = str(text) text = re.sub(r'[0-9"]', "", text) text = re.sub(r"#[\S]+\b", "", text) text = re.sub(r"@[\S]+\b", "", text) text = re.sub(r"https?\S+", "", text) text = re.sub(r"\s+", " ", text) text = exclude_duplicate_sentences(text, lang) return text.strip() class NLPTransform(BasicTransform): """Transform for nlp task.""" @property def targets(self): return {"data": self.apply} def update_params(self, params, **kwargs): if hasattr(self, "interpolation"): params["interpolation"] = self.interpolation if hasattr(self, "fill_value"): params["fill_value"] = self.fill_value return params def get_sentences(self, text, lang="en"): return sent_tokenize(text, LANGS.get(lang, "english")) class ShuffleSentencesTransform(NLPTransform): """Do shuffle by sentence""" def __init__(self, always_apply=False, p=0.5): super(ShuffleSentencesTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data sentences = self.get_sentences(text, lang) random.shuffle(sentences) return " ".join(sentences), lang class ExcludeDuplicateSentencesTransform(NLPTransform): """Exclude equal sentences""" def __init__(self, always_apply=False, p=0.5): super(ExcludeDuplicateSentencesTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data sentences = [] for sentence in self.get_sentences(text, lang): sentence = sentence.strip() if sentence not in sentences: sentences.append(sentence) return " ".join(sentences), lang class ExcludeNumbersTransform(NLPTransform): """exclude any numbers""" def __init__(self, always_apply=False, p=0.5): super(ExcludeNumbersTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data text = re.sub(r"[0-9]", "", text) text = re.sub(r"\s+", " ", text) return text, lang class ExcludeHashtagsTransform(NLPTransform): """Exclude any hashtags with #""" def __init__(self, always_apply=False, p=0.5): super(ExcludeHashtagsTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data text = re.sub(r"#[\S]+\b", "", text) text = re.sub(r"\s+", " ", text) return text, lang class ExcludeUsersMentionedTransform(NLPTransform): """Exclude @users""" def __init__(self, always_apply=False, p=0.5): super(ExcludeUsersMentionedTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data text = re.sub(r"@[\S]+\b", "", text) text = re.sub(r"\s+", " ", text) return text, lang class ExcludeUrlsTransform(NLPTransform): """Exclude urls""" def __init__(self, always_apply=False, p=0.5): super(ExcludeUrlsTransform, self).__init__(always_apply, p) def apply(self, data, **params): text, lang = data text = re.sub(r"https?\S+", "", text) text = re.sub(r"\s+", " ", text) return text, lang # ### [Pseudo-labeling with open-subtitles](https://www.kaggle.com/shonenkov/hack-with-parallel-corpus) # More noise with mix of languages can help. I have used [pseudo-labeled open-subtitles dataset](https://www.kaggle.com/shonenkov/open-subtitles-toxic-pseudo-labeling) for this approach. # It is some analogue for Cutmix in Computer Vision: # class SynthesicOpenSubtitlesTransform(NLPTransform): def __init__(self, always_apply=False, p=0.5): super(SynthesicOpenSubtitlesTransform, self).__init__(always_apply, p) df = pd.read_csv( f"{ROOT_PATH}/input/open-subtitles-toxic-pseudo-labeling/open-subtitles-synthesic.csv", index_col="id", )[["comment_text", "toxic", "lang"]] df = df[~df["comment_text"].isna()] df["comment_text"] = df.parallel_apply( lambda x: clean_text(x["comment_text"], x["lang"]), axis=1 ) df = df.drop_duplicates(subset="comment_text") df["toxic"] = df["toxic"].round().astype(np.int) self.synthesic_toxic = df[df["toxic"] == 1].comment_text.values self.synthesic_non_toxic = df[df["toxic"] == 0].comment_text.values del df gc.collect() def generate_synthesic_sample(self, text, toxic): texts = [text] if toxic == 0: for i in range(random.randint(1, 5)): texts.append(random.choice(self.synthesic_non_toxic)) else: for i in range(random.randint(0, 2)): texts.append(random.choice(self.synthesic_non_toxic)) for i in range(random.randint(1, 3)): texts.append(random.choice(self.synthesic_toxic)) random.shuffle(texts) return " ".join(texts) def apply(self, data, **params): text, toxic = data text = self.generate_synthesic_sample(text, toxic) return text, toxic def get_train_transforms(): return albumentations.Compose( [ ExcludeUsersMentionedTransform(p=0.95), ExcludeUrlsTransform(p=0.95), ExcludeNumbersTransform(p=0.95), ExcludeHashtagsTransform(p=0.95), ExcludeDuplicateSentencesTransform(p=0.95), ], p=1.0, ) def get_synthesic_transforms(): return SynthesicOpenSubtitlesTransform(p=0.5) train_transforms = get_train_transforms() synthesic_transforms = get_synthesic_transforms() tokenizer = XLMRobertaTokenizer.from_pretrained(BACKBONE_PATH) shuffle_transforms = ShuffleSentencesTransform(always_apply=True) def onehot(size, target): vec = torch.zeros(size, dtype=torch.float32) vec[target] = 1.0 return vec class DatasetRetriever(Dataset): def __init__( self, labels_or_ids, comment_texts, langs, use_train_transforms=False, test=False, ): self.test = test self.labels_or_ids = labels_or_ids self.comment_texts = comment_texts self.langs = langs self.use_train_transforms = use_train_transforms def get_tokens(self, text): encoded = tokenizer.encode_plus( text, add_special_tokens=True, max_length=MAX_LENGTH, pad_to_max_length=True ) return encoded["input_ids"], encoded["attention_mask"] def __len__(self): return self.comment_texts.shape[0] def __getitem__(self, idx): text = self.comment_texts[idx] lang = self.langs[idx] if self.test is False: label = self.labels_or_ids[idx] target = onehot(2, label) if self.use_train_transforms: text, _ = train_transforms(data=(text, lang))["data"] tokens, attention_mask = self.get_tokens(str(text)) token_length = sum(attention_mask) if token_length > 0.8 * MAX_LENGTH: text, _ = shuffle_transforms(data=(text, lang))["data"] elif token_length < 60: text, _ = synthesic_transforms(data=(text, label))["data"] else: tokens, attention_mask = torch.tensor(tokens), torch.tensor( attention_mask ) return target, tokens, attention_mask tokens, attention_mask = self.get_tokens(str(text)) tokens, attention_mask = torch.tensor(tokens), torch.tensor(attention_mask) if self.test is False: return target, tokens, attention_mask return self.labels_or_ids[idx], tokens, attention_mask def get_labels(self): return list(np.char.add(self.labels_or_ids.astype(str), self.langs)) # ### Here I have used [this kernel](https://www.kaggle.com/shonenkov/prepare-training-data) for merging all train data df_train = pd.read_csv( f"{ROOT_PATH}/input/jigsaw-public-baseline-train-data/train_data.csv" ) train_dataset = DatasetRetriever( labels_or_ids=df_train["toxic"].values, comment_texts=df_train["comment_text"].values, langs=df_train["lang"].values, use_train_transforms=True, ) del df_train gc.collect() for targets, tokens, attention_masks in train_dataset: break print(targets) print(tokens.shape) print(attention_masks.shape) # ### Class Balance # After some experiments I have decided that [class balance](https://www.kaggle.com/shonenkov/class-balance-with-pytorch-xla) in this competition is very important. Also I noticed impact if use balancing dataset by languages. # Here you can see unique values for get_labels method: np.unique(train_dataset.get_labels()) df_val = pd.read_csv( f"{ROOT_PATH}/input/jigsaw-multilingual-toxic-comment-classification/validation.csv", index_col="id", ) validation_tune_dataset = DatasetRetriever( labels_or_ids=df_val["toxic"].values, comment_texts=df_val["comment_text"].values, langs=df_val["lang"].values, use_train_transforms=True, ) df_val["comment_text"] = df_val.parallel_apply( lambda x: clean_text(x["comment_text"], x["lang"]), axis=1 ) validation_dataset = DatasetRetriever( labels_or_ids=df_val["toxic"].values, comment_texts=df_val["comment_text"].values, langs=df_val["lang"].values, use_train_transforms=False, ) del df_val gc.collect() for targets, tokens, attention_masks in validation_dataset: break print(targets) print(tokens.shape) print(attention_masks.shape) df_test = pd.read_csv( f"{ROOT_PATH}/input/jigsaw-multilingual-toxic-comment-classification/test.csv", index_col="id", ) df_test["comment_text"] = df_test.parallel_apply( lambda x: clean_text(x["content"], x["lang"]), axis=1 ) test_dataset = DatasetRetriever( labels_or_ids=df_test.index.values, comment_texts=df_test["comment_text"].values, langs=df_test["lang"].values, use_train_transforms=False, test=True, ) del df_test gc.collect() for ids, tokens, attention_masks in test_dataset: break print(ids) print(tokens.shape) print(attention_masks.shape) class RocAucMeter(object): def __init__(self): self.reset() def reset(self): self.y_true = np.array([0, 1]) self.y_pred = np.array([0.5, 0.5]) self.score = 0 def update(self, y_true, y_pred): y_true = y_true.cpu().numpy().argmax(axis=1) y_pred = nn.functional.softmax(y_pred, dim=1).data.cpu().numpy()[:, 1] self.y_true = np.hstack((self.y_true, y_true)) self.y_pred = np.hstack((self.y_pred, y_pred)) self.score = sklearn.metrics.roc_auc_score( self.y_true, self.y_pred, labels=np.array([0, 1]) ) @property def avg(self): return self.score class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count # ### Label Smoothing is all you need # Now we can use translating and augmenting data for training with this Loss: class LabelSmoothing(nn.Module): def __init__(self, smoothing=0.1): super(LabelSmoothing, self).__init__() self.confidence = 1.0 - smoothing self.smoothing = smoothing def forward(self, x, target): if self.training: x = x.float() target = target.float() logprobs = torch.nn.functional.log_softmax(x, dim=-1) nll_loss = -logprobs * target nll_loss = nll_loss.sum(-1) smooth_loss = -logprobs.mean(dim=-1) loss = self.confidence * nll_loss + self.smoothing * smooth_loss return loss.mean() else: return torch.nn.functional.cross_entropy(x, target) # # Custom TPU Fitter # # P.S. Lets go to do contributing [Catalyst](https://github.com/catalyst-team/catalyst) with TPU backend :) # # 可注意區域 # 下方 code 即可看到有引入相關 `torch_xla`的部分,若沒有正常安裝則無法正確執行底下的 code 。 import warnings warnings.filterwarnings("ignore") import torch_xla import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl import torch_xla.distributed.xla_multiprocessing as xmp from catalyst.data.sampler import DistributedSamplerWrapper, BalanceClassSampler class TPUFitter: def __init__(self, model, device, config): if not os.path.exists("node_submissions"): os.makedirs("node_submissions") self.config = config self.epoch = 0 self.log_path = "log.txt" self.model = model self.device = device param_optimizer = list(self.model.named_parameters()) no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in param_optimizer if not any(nd in n for nd in no_decay) ], "weight_decay": 0.001, }, { "params": [ p for n, p in param_optimizer if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] self.optimizer = AdamW( optimizer_grouped_parameters, lr=config.lr * xm.xrt_world_size() ) self.scheduler = config.SchedulerClass( self.optimizer, **config.scheduler_params ) self.criterion = config.criterion xm.master_print(f"Fitter prepared. Device is {self.device}") def fit(self, train_loader, validation_loader): for e in range(self.config.n_epochs): if self.config.verbose: lr = self.optimizer.param_groups[0]["lr"] timestamp = datetime.utcnow().isoformat() self.log(f"\n{timestamp}\nLR: {lr}") t = time.time() para_loader = pl.ParallelLoader(train_loader, [self.device]) losses, final_scores = self.train_one_epoch( para_loader.per_device_loader(self.device) ) self.log( f"[RESULT]: Train. Epoch: {self.epoch}, loss: {losses.avg:.5f}, final_score: {final_scores.avg:.5f}, time: {(time.time() - t):.5f}" ) t = time.time() para_loader = pl.ParallelLoader(validation_loader, [self.device]) losses, final_scores = self.validation( para_loader.per_device_loader(self.device) ) self.log( f"[RESULT]: Validation. Epoch: {self.epoch}, loss: {losses.avg:.5f}, final_score: {final_scores.avg:.5f}, time: {(time.time() - t):.5f}" ) if self.config.validation_scheduler: self.scheduler.step(metrics=final_scores.avg) self.epoch += 1 def run_tuning_and_inference(self, test_loader, validation_tune_loader): for e in range(2): self.optimizer.param_groups[0]["lr"] = ( self.config.lr * xm.xrt_world_size() / (e + 1) ) para_loader = pl.ParallelLoader(validation_tune_loader, [self.device]) losses, final_scores = self.train_one_epoch( para_loader.per_device_loader(self.device) ) para_loader = pl.ParallelLoader(test_loader, [self.device]) self.run_inference(para_loader.per_device_loader(self.device)) def validation(self, val_loader): self.model.eval() losses = AverageMeter() final_scores = RocAucMeter() t = time.time() for step, (targets, inputs, attention_masks) in enumerate(val_loader): if self.config.verbose: if step % self.config.verbose_step == 0: xm.master_print( f"Valid Step {step}, loss: " + f"{losses.avg:.5f}, final_score: {final_scores.avg:.5f}, " + f"time: {(time.time() - t):.5f}" ) with torch.no_grad(): inputs = inputs.to(self.device, dtype=torch.long) attention_masks = attention_masks.to(self.device, dtype=torch.long) targets = targets.to(self.device, dtype=torch.float) outputs = self.model(inputs, attention_masks) loss = self.criterion(outputs, targets) batch_size = inputs.size(0) final_scores.update(targets, outputs) losses.update(loss.detach().item(), batch_size) return losses, final_scores def train_one_epoch(self, train_loader): self.model.train() losses = AverageMeter() final_scores = RocAucMeter() t = time.time() for step, (targets, inputs, attention_masks) in enumerate(train_loader): if self.config.verbose: if step % self.config.verbose_step == 0: self.log( f"Train Step {step}, loss: " + f"{losses.avg:.5f}, final_score: {final_scores.avg:.5f}, " + f"time: {(time.time() - t):.5f}" ) inputs = inputs.to(self.device, dtype=torch.long) attention_masks = attention_masks.to(self.device, dtype=torch.long) targets = targets.to(self.device, dtype=torch.float) self.optimizer.zero_grad() outputs = self.model(inputs, attention_masks) loss = self.criterion(outputs, targets) batch_size = inputs.size(0) final_scores.update(targets, outputs) losses.update(loss.detach().item(), batch_size) loss.backward() xm.optimizer_step(self.optimizer) if self.config.step_scheduler: self.scheduler.step() self.model.eval() self.save("last-checkpoint.bin") return losses, final_scores def run_inference(self, test_loader): self.model.eval() result = {"id": [], "toxic": []} t = time.time() for step, (ids, inputs, attention_masks) in enumerate(test_loader): if self.config.verbose: if step % self.config.verbose_step == 0: xm.master_print( f"Prediction Step {step}, time: {(time.time() - t):.5f}" ) with torch.no_grad(): inputs = inputs.to(self.device, dtype=torch.long) attention_masks = attention_masks.to(self.device, dtype=torch.long) outputs = self.model(inputs, attention_masks) toxics = nn.functional.softmax(outputs, dim=1).data.cpu().numpy()[:, 1] result["id"].extend(ids.cpu().numpy()) result["toxic"].extend(toxics) result = pd.DataFrame(result) node_count = len(glob("node_submissions/*.csv")) result.to_csv( f"node_submissions/submission_{node_count}_{datetime.utcnow().microsecond}_{random.random()}.csv", index=False, ) def save(self, path): xm.save(self.model.state_dict(), path) def log(self, message): if self.config.verbose: xm.master_print(message) with open(self.log_path, "a+") as logger: xm.master_print(f"{message}", logger) # ### Model from transformers import XLMRobertaModel class ToxicSimpleNNModel(nn.Module): def __init__(self): super(ToxicSimpleNNModel, self).__init__() self.backbone = XLMRobertaModel.from_pretrained(BACKBONE_PATH) self.dropout = nn.Dropout(0.3) self.linear = nn.Linear( in_features=self.backbone.pooler.dense.out_features * 2, out_features=2, ) def forward(self, input_ids, attention_masks): bs, seq_length = input_ids.shape seq_x, _ = self.backbone(input_ids=input_ids, attention_mask=attention_masks) apool = torch.mean(seq_x, 1) mpool, _ = torch.max(seq_x, 1) x = torch.cat((apool, mpool), 1) x = self.dropout(x) return self.linear(x) net = ToxicSimpleNNModel() # # Custom Config class TrainGlobalConfig: num_workers = 0 batch_size = 16 n_epochs = 3 lr = 0.5 * 1e-5 # ------------------- verbose = True verbose_step = 50 # ------------------- # -------------------- step_scheduler = False # do scheduler.step after optimizer.step validation_scheduler = True # do scheduler.step after validation stage loss SchedulerClass = torch.optim.lr_scheduler.ReduceLROnPlateau scheduler_params = dict( mode="max", factor=0.7, patience=0, verbose=False, threshold=0.0001, threshold_mode="abs", cooldown=0, min_lr=1e-8, eps=1e-08, ) # -------------------- # ------------------- criterion = LabelSmoothing() # ------------------- # ### Main method def _mp_fn(rank, flags): device = xm.xla_device() net.to(device) train_sampler = DistributedSamplerWrapper( sampler=BalanceClassSampler( labels=train_dataset.get_labels(), mode="downsampling" ), num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=True, ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=TrainGlobalConfig.batch_size, sampler=train_sampler, pin_memory=False, drop_last=True, num_workers=TrainGlobalConfig.num_workers, ) validation_sampler = torch.utils.data.distributed.DistributedSampler( validation_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=False, ) validation_loader = torch.utils.data.DataLoader( validation_dataset, batch_size=TrainGlobalConfig.batch_size, sampler=validation_sampler, pin_memory=False, drop_last=False, num_workers=TrainGlobalConfig.num_workers, ) validation_tune_sampler = torch.utils.data.distributed.DistributedSampler( validation_tune_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=True, ) validation_tune_loader = torch.utils.data.DataLoader( validation_tune_dataset, batch_size=TrainGlobalConfig.batch_size, sampler=validation_tune_sampler, pin_memory=False, drop_last=False, num_workers=TrainGlobalConfig.num_workers, ) test_sampler = torch.utils.data.distributed.DistributedSampler( test_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=False, ) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=TrainGlobalConfig.batch_size, sampler=test_sampler, pin_memory=False, drop_last=False, num_workers=TrainGlobalConfig.num_workers, ) if rank == 0: time.sleep(1) fitter = TPUFitter(model=net, device=device, config=TrainGlobalConfig) fitter.fit(train_loader, validation_loader) fitter.run_tuning_and_inference(test_loader, validation_tune_loader) # ## Colab Notebook # I hope Kaggle Team will increase RAM memory for tpu notebook as soon as possible. But now I recommend you use colab pro with HIGH RAM mode :) # [Here](https://drive.google.com/drive/folders/1hbcSRfvtTTlERs7remsRST2amIWAFVry?usp=sharing) I have created public read-only google drive with colab notebook! You can save copy and start training right now! # Also you can run this code here with nprocs=1, if you need. It works! But it is very slow (~1.5 P100). # FLAGS={} # xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=8, start_method='fork') # submission = pd.concat([pd.read_csv(path) for path in glob('node_submissions/*.csv')]).groupby('id').mean() # submission['toxic'].hist(bins=100) # Let's imagine that this logs have got using Kaggle: file = open("../input/jigsaw-public-baseline-results/log.txt", "r") for line in file.readlines(): print(line[:-1]) file.close() # This model should be trained ~10 epoch, I have run only 3 epoch for this kernel. # ## Submission # If you want to get high score ~0.945-0.946 such as [[TPU-Inference] Super Fast XLMRoberta](https://www.kaggle.com/shonenkov/tpu-inference-super-fast-xlmroberta) you should do blend such as [here](https://www.kaggle.com/hamditarek/ensemble), but I would like to make submission with only this kernel submission = pd.read_csv( "../input/jigsaw-public-baseline-results/submission.csv", index_col="id" ) submission.hist(bins=100) submission.to_csv("submission.csv")
[{"jigsaw-public-baseline-results/submission.csv": {"column_names": "[\"id\", \"toxic\"]", "column_data_types": "{\"id\": \"int64\", \"toxic\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 63812 entries, 0 to 63811\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 63812 non-null int64 \n 1 toxic 63812 non-null float64\ndtypes: float64(1), int64(1)\nmemory usage: 997.2 KB\n", "summary": "{\"id\": {\"count\": 63812.0, \"mean\": 31905.5, \"std\": 18421.082025766023, \"min\": 0.0, \"25%\": 15952.75, \"50%\": 31905.5, \"75%\": 47858.25, \"max\": 63811.0}, \"toxic\": {\"count\": 63812.0, \"mean\": 0.286833913350325, \"std\": 0.3108044614098088, \"min\": 0.0352783203125, \"25%\": 0.0543212890625, \"50%\": 0.095458984375, \"75%\": 0.484375, \"max\": 0.96875}}", "examples": "{\"id\":{\"0\":0,\"1\":1,\"2\":2,\"3\":3},\"toxic\":{\"0\":0.0385742188,\"1\":0.0555419922,\"2\":0.515625,\"3\":0.0489501953}}"}}]
true
1
<start_data_description><data_path>jigsaw-public-baseline-results/submission.csv: <column_names> ['id', 'toxic'] <column_types> {'id': 'int64', 'toxic': 'float64'} <dataframe_Summary> {'id': {'count': 63812.0, 'mean': 31905.5, 'std': 18421.082025766023, 'min': 0.0, '25%': 15952.75, '50%': 31905.5, '75%': 47858.25, 'max': 63811.0}, 'toxic': {'count': 63812.0, 'mean': 0.286833913350325, 'std': 0.3108044614098088, 'min': 0.0352783203125, '25%': 0.0543212890625, '50%': 0.095458984375, '75%': 0.484375, 'max': 0.96875}} <dataframe_info> RangeIndex: 63812 entries, 0 to 63811 Data columns (total 2 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 63812 non-null int64 1 toxic 63812 non-null float64 dtypes: float64(1), int64(1) memory usage: 997.2 KB <some_examples> {'id': {'0': 0, '1': 1, '2': 2, '3': 3}, 'toxic': {'0': 0.0385742188, '1': 0.0555419922, '2': 0.515625, '3': 0.0489501953}} <end_description>
8,405
0
8,723
8,405
69235254
import spacy spacy.__version__ import fastai fastai.__version__ # ## Prepraring for inference # ### Preparing the data for inference import glob import numpy from fastai.text.all import * df_test = pd.read_csv("../input/commonlitreadabilityprize/test.csv") df_test.head(3) df_test.shape # Getting the list of ids. This would later be used to create the submission file ids = df_test.id.to_list() ids[:2] # Getting the excerpts to a list. This makes it easier to make predictions and later to create the submission file. excerpts = df_test.excerpt.to_list() # ## Loading the model bkwd_model = load_learner( "../input/common-lit-forward-backward-training-on-ulmfit/back_final_model" ) fwd_model = load_learner( "../input/common-lit-forward-backward-training-on-ulmfit/forward_final_model" ) # # Predictions def ensamble(bkwd_model, fwd_model, excerpts): preds = [] for excerpt in excerpts: avg_pred = ( torch.cat( [(bkwd_model.predict(excerpt))[1], (fwd_model.predict(excerpt))[1]] ) ).mean() preds.append(avg_pred) return preds ensamble(bkwd_model, fwd_model, [excerpts[0]]) # predictions = [pred.item() for pred in ensamble(bkwd_model, fwd_model, excerpts)] predictions = [ float("{0:.2f}".format(pred.item())) for pred in ensamble(bkwd_model, fwd_model, excerpts) ] # predictions = [float("{0:.2f}".format(model.predict(excerpt)[0][0])) for excerpt in excerpts] predictions[:2] # # Creating the submission file # Creating a new dataframe submission_df = pd.DataFrame(list(zip(ids, predictions)), columns=["id", "target"]) # Saving the dataframe as csv for submission. submission_df.head(2) submission_df.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/235/69235254.ipynb
null
null
[{"Id": 69235254, "ScriptId": 18894513, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1174372, "CreationDate": "07/28/2021 12:45:21", "VersionNumber": 3.0, "Title": "Commonlit - inference- Enseamble-fwd-bkwd-lm", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 68.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 66.0, "LinesInsertedFromFork": 26.0, "LinesDeletedFromFork": 4.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 42.0, "TotalVotes": 0}]
null
null
null
null
import spacy spacy.__version__ import fastai fastai.__version__ # ## Prepraring for inference # ### Preparing the data for inference import glob import numpy from fastai.text.all import * df_test = pd.read_csv("../input/commonlitreadabilityprize/test.csv") df_test.head(3) df_test.shape # Getting the list of ids. This would later be used to create the submission file ids = df_test.id.to_list() ids[:2] # Getting the excerpts to a list. This makes it easier to make predictions and later to create the submission file. excerpts = df_test.excerpt.to_list() # ## Loading the model bkwd_model = load_learner( "../input/common-lit-forward-backward-training-on-ulmfit/back_final_model" ) fwd_model = load_learner( "../input/common-lit-forward-backward-training-on-ulmfit/forward_final_model" ) # # Predictions def ensamble(bkwd_model, fwd_model, excerpts): preds = [] for excerpt in excerpts: avg_pred = ( torch.cat( [(bkwd_model.predict(excerpt))[1], (fwd_model.predict(excerpt))[1]] ) ).mean() preds.append(avg_pred) return preds ensamble(bkwd_model, fwd_model, [excerpts[0]]) # predictions = [pred.item() for pred in ensamble(bkwd_model, fwd_model, excerpts)] predictions = [ float("{0:.2f}".format(pred.item())) for pred in ensamble(bkwd_model, fwd_model, excerpts) ] # predictions = [float("{0:.2f}".format(model.predict(excerpt)[0][0])) for excerpt in excerpts] predictions[:2] # # Creating the submission file # Creating a new dataframe submission_df = pd.DataFrame(list(zip(ids, predictions)), columns=["id", "target"]) # Saving the dataframe as csv for submission. submission_df.head(2) submission_df.to_csv("submission.csv", index=False)
false
0
557
0
557
557
69235929
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # **IMPORT LIBRARIES** import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scipy import stats from scipy.stats import norm import warnings warnings.filterwarnings("ignore") # # **IMPORT DATASET** train_df = pd.read_csv("../input/home-data-for-ml-course/train.csv", index_col="Id") test_df = pd.read_csv("../input/home-data-for-ml-course/test.csv", index_col="Id") train_df.shape # There are a total of *1460* training examples, having 80 features and 1 target column, i.e. *SalePrice*. train_df.info() # A lot of missing values in *Alley*, *PoolQC*, *Fence*, and *MiscFeature*. Thus, it would be reasonable to remove these features. train_df.drop(["Alley", "PoolQC", "Fence", "MiscFeature"], axis=1, inplace=True) test_df.drop(["Alley", "PoolQC", "Fence", "MiscFeature"], axis=1, inplace=True) pd.set_option("display.max_columns", 100) train_df.describe() train_df.head(10) # # **1. EXPLORATORY DATA ANALYSIS** # Classifying columns as *categorical* and *numerical*: def col_type(col): if (col.nunique() <= 10) | (col.dtype == "object"): return "cat" else: return "num" categorical_col = [ cname for cname in train_df.columns if col_type(train_df[cname]) == "cat" ] numerical_col = [ cname for cname in train_df.columns if col_type(train_df[cname]) == "num" ] print("Categorical Columns: {}".format(categorical_col)) print("Numerical Columns: {}".format(numerical_col)) # However, owing to very few number of houses with a pool, *PoolArea* got wrongly classified as a categorical feature. numerical_col.append("poolArea") categorical_col.remove("PoolArea") corrMat = train_df.corr() f, ax = plt.subplots(figsize=(20, 15)) cmap = sns.diverging_palette(230, 20, as_cmap=True) mask = np.triu(np.ones_like(corrMat, dtype=bool)) sns.heatmap(corrMat, square=True, annot=True, cmap=cmap, mask=mask) # Let's go with the following rules of thumb: # **1.** Any two predictor variables having correlation coefficient greater than or equal to 0.7 can be said to have high positive correlation, and thus, any one of the two could be removed. # **2.** Same goes for pairs of predictor variables having correlation coefficient less than or equal to -0.7. # **3.** Predictor variables having correlation greater than 0.6 or less than -0.6 with the target *SalePrice* can be labeled as important predictors/features. related = corrMat[corrMat.abs() >= 0.6] plt.figure(figsize=(20, 15)) mask = np.triu(np.ones_like(related, dtype=bool)) sns.heatmap(related, annot=True, square=True, mask=mask) # From the above plot: # **1.** Pairs *YearBuilt* & *GarageYrBlt*, *GarageArea* & *GarageCars*, *TotRmsAbvGrd* & *GrLivArea*, *2ndFlrSF* & *GrLivArea*, *1stFlrSF* & *TotalBsmtSF* have high correlations and hence one from each pair can be removed. Say, we will remove *GarageYrBlt*, *GarageCars*, *TotRmsAbvGrd*, and *1stFlrSF*. # **2.** Predictors *OverallQual*, *GrLivArea*, *TotalBsmtSF*, and *GarageArea* have high correlation with target *SalePrice*. Hence, these are our important features. removables = ["GarageYrBlt", "GarageCars", "TotRmsAbvGrd", "1stFlrSF", "2ndFlrSF"] categorical_col.remove("GarageCars") # Now, we are going to look for predictors that have very low correlation with the target *SalePrice*, i.e. features that have little to no effect on our target variable. # For this, we will look into our correlation matrix and extract only those features that have correlation coefficient value between -0.1 and 0.1 with the target *SalePrice*. # **Note:** Dropping features based on their feature-to-target-correlation essentially is a form of feature filtering. It is important to understand that feature filtering does not necessarily improve predictive performance. This is because for complex regression models, these features might get coupled with other features and contribute to the predictive performance of the model. # For the above reason, we are not going to remove these features from the training set just yet. Instead, we will observe how a particular model would perform with and without these features. Then we can take that result to determine whether or not to keep these features. plt.figure(figsize=(5, 15)) S = corrMat[["SalePrice"]] sns.heatmap(S, annot=True) lowCorrFeatures = [] for col in corrMat.index: if (col in numerical_col) & (-0.1 <= corrMat[col]["SalePrice"] <= 0.1): lowCorrFeatures.append(col) print(lowCorrFeatures) # removing all the removable predictors from dataframe train_df.drop(removables, axis=1, inplace=True) test_df.drop(removables, axis=1, inplace=True) train_df.drop(lowCorrFeatures, axis=1, inplace=True) test_df.drop(lowCorrFeatures, axis=1, inplace=True) train_df.shape # # **2. DATA CLEANING** # Before we jump into data cleaning, it is good practice to first split up the dataset into train and validation sets. from sklearn.model_selection import train_test_split X = train_df.copy() y = X.SalePrice X.drop("SalePrice", axis=1, inplace=True) X_train, X_valid, y_train, y_valid = train_test_split( X, y, test_size=0.2, random_state=42 ) X_test = test_df.copy() # print(X_train.shape, y_train.shape) -> (1168, 70) (1168,) # First of all, determine the amount and type of missing data in the given dataframe. missing_val_cols = X_train.isnull().sum().sort_values(ascending=False) missing_val_cols = missing_val_cols[missing_val_cols > 0] ratio_of_missing = missing_val_cols / X_train.shape[0] missing_cols = pd.concat( [missing_val_cols, ratio_of_missing * 100], axis=1, keys=["Count", "%"] ) missing_cols # For 'Electrical', 'MasVnrArea' and 'MasVnrType', removing the entries with missing values. y_train.drop(X_train.loc[X_train.Electrical.isnull()].index, axis=0, inplace=True) X_train.drop(X_train.loc[X_train.Electrical.isnull()].index, axis=0, inplace=True) # y_train.drop(X_train.loc[X_train.MasVnrArea.isnull()].index, axis = 0, inplace = True) # X_train.drop(X_train.loc[X_train.MasVnrArea.isnull()].index, axis = 0, inplace = True) # y_train.drop(X_train.loc[X_train.MasVnrType.isnull()].index, axis = 0, inplace = True) # X_train.drop(X_train.loc[X_train.MasVnrType.isnull()].index, axis = 0, inplace = True) # For The following features, it's apparent that a missing entry is a direct indication # of absence of the corresponding functionality. Thus, treating NaN as a separate # class 'missing' would be wise. X_train["BsmtQual"] = X_train["BsmtQual"].fillna("missing") X_train["BsmtCond"] = X_train["BsmtCond"].fillna("missing") X_train["BsmtExposure"] = X_train["BsmtExposure"].fillna("missing") X_train["BsmtFinType1"] = X_train["BsmtFinType1"].fillna("missing") X_train["BsmtFinType2"] = X_train["BsmtFinType2"].fillna("missing") X_train["GarageCond"] = X_train["GarageCond"].fillna("missing") X_train["GarageQual"] = X_train["GarageQual"].fillna("missing") X_train["GarageFinish"] = X_train["GarageFinish"].fillna("missing") X_train["GarageType"] = X_train["GarageType"].fillna("missing") X_train["MasVnrArea"] = X_train["MasVnrArea"].fillna(0) X_train["MasVnrType"] = X_train["MasVnrType"].fillna("missing") X_train["FireplaceQu"] = X_train["FireplaceQu"].fillna("missing") # Same for test set X_test["BsmtQual"] = X_test["BsmtQual"].fillna("missing") X_test["BsmtCond"] = X_test["BsmtCond"].fillna("missing") X_test["BsmtExposure"] = X_test["BsmtExposure"].fillna("missing") X_test["BsmtFinType1"] = X_test["BsmtFinType1"].fillna("missing") X_test["BsmtFinType2"] = X_test["BsmtFinType2"].fillna("missing") X_test["GarageCond"] = X_test["GarageCond"].fillna("missing") X_test["GarageQual"] = X_test["GarageQual"].fillna("missing") X_test["GarageFinish"] = X_test["GarageFinish"].fillna("missing") X_test["GarageType"] = X_test["GarageType"].fillna("missing") X_test["Electrical"] = X_test["Electrical"].fillna("SBrkr") X_test["MasVnrArea"] = X_test["MasVnrArea"].fillna(0) X_test["MasVnrType"] = X_test["MasVnrType"].fillna("missing") X_test["FireplaceQu"] = X_test["FireplaceQu"].fillna("missing") # For GarageCond, rows with missing field overlaps with rows having missing fields for # other Garage related features. Same goes for BsmtCond. Thus, by deleting just these rows # we can effectively deal with all related missing entries. # X_train.drop(X_train.loc[X_train.GarageCond.isnull()].index, axis = 0, inplace = True) # X_train.drop(X_train.loc[X_train.BsmtCond.isnull()].index, axis = 0, inplace = True) # garage_removables = ['GarageCond', 'GarageType', 'GarageFinish', 'GarageQual'] # removables.append(garage_removables) # X_train.drop(garage_removables, axis = 1, inplace = True) # X_valid.drop(garage_removables, axis = 1, inplace = True) print(X_train.shape) print(y_train.shape) # For *LotFrontage*: # 1. Dropping the column is out the question as in the actual market scenario, measure of linear feet of street connected to the property has significant effect on the price. # 2. For this exact reason, imputing the missing values would result in nonsensical data. # 3. Deleting rows with missing field would mean throwing away about 20% of the training dataset. # We can further confirm the effect of *LotFrontage* on target variable with the help of the following plot: full_train = pd.concat([X_train, y_train], axis=1) plt.figure(figsize=(12, 6)) sns.lmplot(x="LotFrontage", y="SalePrice", data=full_train) plt.show() # As is obvious from the above plot, there is plenty positive correlation between the two. # Therefore, we need to proceed with caution while dealing with this feature. # One way to deal with the situation is to build a model that predicts the missing values of *LotFrontage*. Here, we treat *LotFrontage* as the dependent variable while the other features would be independent variables. # Encoding categorical columns from X_train and storing in new dataframe. temp_df = pd.get_dummies(X_train, columns=categorical_col, drop_first=True) # Dataframe consisting of non-null values of LotFrontage. This will serve as training set. df_non_nulls = temp_df[temp_df["LotFrontage"].notnull()].copy() lot_y = df_non_nulls.LotFrontage.copy() lot_X = df_non_nulls.drop("LotFrontage", axis=1, inplace=False) lot_x_train, lot_x_valid, lot_y_train, lot_y_valid = train_test_split( lot_X, lot_y, test_size=0.2, random_state=42 ) # Fitting model on the training set with target LotFrontage. from sklearn.ensemble import RandomForestRegressor rfr = RandomForestRegressor(max_depth=25, min_samples_leaf=2, random_state=0) rfr.fit(lot_x_train, lot_y_train) score = rfr.score(lot_x_valid, lot_y_valid) # print(score) -> 0.6335685958712058 # Dataframe consisting of null values of LotFrontage. This is our test set as here we # don't know a single value of LotFrontage. df_nulls = temp_df[temp_df["LotFrontage"].isnull()].copy() df_nulls.drop("LotFrontage", axis=1, inplace=True) df_nulls["LotFrontage"] = rfr.predict(df_nulls) # Now copying the predicted values of LotFrontage back to the original training set. for idx in df_nulls.index: X_train.loc[X_train.index == idx, "LotFrontage"] = df_nulls.loc[idx]["LotFrontage"] X_train.isnull().any().any() # This concludes the part of dealing with missing values. (**Finally!!**) missing_val_cols_valid = X_valid.isnull().sum().sort_values(ascending=False) missing_val_cols_valid = missing_val_cols_valid[missing_val_cols_valid > 0] ratio_of_missing_valid = missing_val_cols_valid / X_valid.shape[0] missing_cols_valid = pd.concat( [missing_val_cols_valid, ratio_of_missing_valid * 100], axis=1, keys=["Count", "%"] ) missing_cols_valid # Applying the same imputation rules as the training set: y_valid.drop(X_valid.loc[X_valid.MasVnrArea.isnull()].index, axis=0, inplace=True) X_valid.drop(X_valid.loc[X_valid.MasVnrArea.isnull()].index, axis=0, inplace=True) y_valid.drop(X_valid.loc[X_valid.MasVnrType.isnull()].index, axis=0, inplace=True) X_valid.drop(X_valid.loc[X_valid.MasVnrType.isnull()].index, axis=0, inplace=True) X_valid["BsmtQual"] = X_valid["BsmtQual"].fillna("missing") X_valid["BsmtCond"] = X_valid["BsmtCond"].fillna("missing") X_valid["BsmtExposure"] = X_valid["BsmtExposure"].fillna("missing") X_valid["BsmtFinType1"] = X_valid["BsmtFinType1"].fillna("missing") X_valid["BsmtFinType2"] = X_valid["BsmtFinType2"].fillna("missing") X_valid["GarageCond"] = X_valid["GarageCond"].fillna("missing") X_valid["GarageQual"] = X_valid["GarageQual"].fillna("missing") X_valid["GarageFinish"] = X_valid["GarageFinish"].fillna("missing") X_valid["GarageType"] = X_valid["GarageType"].fillna("missing") X_valid["FireplaceQu"] = X_valid["FireplaceQu"].fillna("missing") temp_df_valid = pd.get_dummies(X_valid, columns=categorical_col, drop_first=True) temp_df, temp_df_valid = temp_df.align(temp_df_valid, join="left", axis=1) df_nulls_valid = temp_df_valid[temp_df_valid.LotFrontage.isnull()].copy() df_nulls_valid.drop("LotFrontage", axis=1, inplace=True) df_nulls_valid = df_nulls_valid.fillna(0) df_nulls_valid["LotFrontage"] = rfr.predict(df_nulls_valid) for idx in df_nulls_valid.index: X_valid.loc[X_valid.index == idx, "LotFrontage"] = df_nulls_valid.loc[idx][ "LotFrontage" ] # Same for test set temp_df_test = pd.get_dummies(X_test, columns=categorical_col, drop_first=True) temp_df, temp_df_test = temp_df.align(temp_df_test, join="left", axis=1) df_nulls_test = temp_df_test[temp_df_test.LotFrontage.isnull()].copy() df_nulls_test.drop("LotFrontage", axis=1, inplace=True) df_nulls_test = df_nulls_test.fillna(0) df_nulls_test["LotFrontage"] = rfr.predict(df_nulls_test) for idx in df_nulls_test.index: X_test.loc[X_test.index == idx, "LotFrontage"] = df_nulls_test.loc[idx][ "LotFrontage" ] X_valid.isnull().any().any() # # **3. ENCODING** # Firstly, let us look at the categorical features we have: print(categorical_col) # There are many rating variables (i.e. 1-10), which will be better off in the numerical variables set. numerical_col.append(["OverallQual", "OverallCond"]) categorical_col.remove("OverallQual") categorical_col.remove("OverallCond") # Updated list of categorical variables: print(categorical_col) # Out of these, there are many ordinal variables and many nominal ones. So, appropriate encoding techniques need to be used. # **FOR ORDINAL VARIABLES:** # First off, we have to hand pick all ordinal variables. For that, examining the *data_description.txt* file would help. ordinal_variables = [ "LotShape", "Utilities", "LandSlope", "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2", "HeatingQC", "KitchenQual", "Functional", "FireplaceQu", "GarageFinish", "GarageQual", "GarageCond", "PavedDrive", ] nominal_variables = list(set(categorical_col) - set(ordinal_variables)) from category_encoders.ordinal import OrdinalEncoder oe = OrdinalEncoder( cols=ordinal_variables, mapping=[ {"col": "LotShape", "mapping": {"IR3": 0, "IR2": 1, "IR1": 2, "Reg": 3}}, { "col": "Utilities", "mapping": {"ELO": 0, "NoSeWa": 1, "NoSeWr": 2, "AllPub": 3}, }, {"col": "LandSlope", "mapping": {"Sev": 0, "Mod": 1, "Gtl": 2}}, {"col": "ExterQual", "mapping": {"Po": 0, "Fa": 1, "TA": 2, "Gd": 3, "Ex": 4}}, {"col": "ExterCond", "mapping": {"Po": 0, "Fa": 1, "TA": 2, "Gd": 3, "Ex": 4}}, { "col": "BsmtQual", "mapping": {"missing": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}, }, { "col": "BsmtCond", "mapping": {"missing": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}, }, { "col": "BsmtExposure", "mapping": {"missing": 0, "No": 1, "Mn": 2, "Av": 3, "Gd": 4}, }, { "col": "BsmtFinType1", "mapping": { "missing": 0, "Unf": 1, "LwQ": 2, "Rec": 3, "BLQ": 4, "ALQ": 5, "GLQ": 6, }, }, { "col": "BsmtFinType2", "mapping": { "missing": 0, "Unf": 1, "LwQ": 2, "Rec": 3, "BLQ": 4, "ALQ": 5, "GLQ": 6, }, }, {"col": "HeatingQC", "mapping": {"Po": 0, "Fa": 1, "TA": 2, "Gd": 3, "Ex": 4}}, { "col": "KitchenQual", "mapping": {"Po": 0, "Fa": 1, "TA": 2, "Gd": 3, "Ex": 4}, }, { "col": "Functional", "mapping": { "Sal": 0, "Sev": 1, "Maj2": 2, "Maj1": 3, "Mod": 4, "Min2": 5, "Min1": 6, "Typ": 7, }, }, { "col": "FireplaceQu", "mapping": {"missing": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}, }, { "col": "GarageFinish", "mapping": {"missing": 0, "Unf": 1, "Rfn": 2, "Fin": 3}, }, { "col": "GarageQual", "mapping": {"missing": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}, }, { "col": "GarageCond", "mapping": {"missing": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}, }, {"col": "PavedDrive", "mapping": {"N": 0, "P": 1, "Y": 2}}, ], ) X_train = oe.fit_transform(X_train) X_valid = oe.transform(X_valid) X_test = oe.transform(X_test) # print(X_train.shape) -> (1161, 70) # print(X_valid.shape) -> (290, 70) # **FOR NOMINAL VARIABLES:** For these, we will use the good ol' one hot encoding technique. X_train = pd.get_dummies(X_train) X_valid = pd.get_dummies(X_valid) X_test = pd.get_dummies(X_test) X_train, X_valid = X_train.align(X_valid, join="left", axis=1) X_train, X_test = X_train.align(X_test, join="left", axis=1) # print(X_train.shape) -> (1161, 208) # print(X_valid.shape) -> (290, 208) # # **4. FEATURE SCALING** # Many machine learning algorithms perform better when numerical input variables are scaled to a standard range. # Standardizing is a popular scaling technique that subtracts the mean from values and divides by the standard deviation, transforming the probability distribution for an input variable to a standard Gaussian (zero mean and unit variance). Standardization can become skewed or biased if the input variable contains outlier values. # To overcome this, the median and interquartile range can be used when standardizing numerical input variables, generally referred to as robust scaling. from sklearn.preprocessing import StandardScaler, RobustScaler scaler = RobustScaler() # To make dataset robust to outliers. robust_X_train = scaler.fit_transform(X_train) robust_X_valid = scaler.transform(X_valid) robust_X_test = scaler.transform(X_test) sc = StandardScaler() scaled_X_train = sc.fit_transform(robust_X_train) scaled_X_valid = sc.transform(robust_X_valid) scaled_X_test = sc.transform(robust_X_test) # On passing through the Scalers, our Data Frame has now been converted to a numpy array. So, for convention, we will convert the array back to a Data Frame. final_X_train = pd.DataFrame( scaled_X_train, index=X_train.index, columns=X_train.columns ) final_X_valid = pd.DataFrame( scaled_X_valid, index=X_valid.index, columns=X_valid.columns ) final_X_test = pd.DataFrame(scaled_X_test, index=X_test.index, columns=X_test.columns) final_X_train.describe() # With this, we are now done with data anaytics. # # **5. MODEL FITTING AND EVALUATION** # It goes without saying that for a dataset having 200+ features, we have to employ complex curve fitting regression techniques. Some regression techniques we are going to use are: # 1. Decision Tree Regressor # 2. Random Forest Regressor # 3. XG Boost Regressor # 4. Ada Boost Regressor # 5. K-Nearest-Neighbors Regressor # 6. Ridge Regressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor from xgboost import XGBRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import Ridge # For parameter selection and comparison, use RandomizedSearchCV from sklearn.model_selection import RandomizedSearchCV dt = DecisionTreeRegressor() rfr = RandomForestRegressor() ada = AdaBoostRegressor() xgb = XGBRegressor() knn = KNeighborsRegressor() rdg = Ridge() # Next, we will use *RandomizedSearchCV* to define and test different combinations of parameters. This is an important step as many a times simply hypertuning the parameters can drastically improve the model performance. from sklearn.metrics import make_scorer, mean_absolute_error, mean_squared_error from scipy.stats import uniform, randint scorer = make_scorer(mean_absolute_error) # Mean Absolute Error as scorer # # # # # # # # Decision Tree # # # # # # # # dt_params = { "criterion": ["mae", "friedman_mse"], "splitter": ["best", "random"], "max_depth": np.arange(1, 50), "max_features": ["auto", "log2", "sqrt"], "min_samples_split": [2, 5, 10], "min_samples_leaf": [2, 3, 4, 5, 10, 15, 20], } dt_rs = RandomizedSearchCV(dt, param_distributions=dt_params, scoring=scorer, cv=5) # # # # # # # # Random Forest # # # # # # # # rfr_params = { "criterion": ["mse", "mae"], "n_estimators": [50, 100, 200, 400, 500], "max_depth": np.arange(1, 50), "max_features": ["auto", "log2", "sqrt"], "min_samples_split": [2, 5, 10], "min_samples_leaf": [2, 3, 4, 5, 10, 20, 30, 40, 50], } rfr_rs = RandomizedSearchCV(rfr, param_distributions=rfr_params, scoring=scorer, cv=5) # # # # # # # # ADA Boost # # # # # # # # ada_params = { "n_estimators": [50, 100, 200, 400, 500], "learning_rate": np.arange(0, 1, 0.01), "loss": ["linear", "square"], } ada_rs = RandomizedSearchCV(ada, param_distributions=ada_params, scoring=scorer, cv=5) # # # # # # # # XG Boost # # # # # # # # xgb_params = { "n_estimators": [50, 100, 200, 300, 400, 500], "learning_rate": np.arange(0, 1, 0.01), "min_child_weight": np.arange(1, 10), } xgb_rs = RandomizedSearchCV(xgb, param_distributions=xgb_params, scoring=scorer, cv=5) # # # # # # # # K-Nearest Neighbors # # # # # # # # knn_params = { "n_neighbors": np.arange(1, 50), "weights": ["uniform", "distance"], "algorithm": ["auto", "ball_tree", "kd_tree"], } knn_rs = RandomizedSearchCV(knn, param_distributions=knn_params, scoring=scorer, cv=5) # # # # # # # # Ridge # # # # # # # # rdg_params = {"alpha": np.arange(0, 1, 0.01), "solver": ["auto", "lsqr", "saga"]} rdg_rs = RandomizedSearchCV(rdg, param_distributions=rdg_params, scoring=scorer, cv=5) # Now that we have our models and their hyperparameter combinations defined, let's fit each model on the training set and evaluate their scores. # # # # # # # # FITTING # # # # # # # # dt_rs.fit(final_X_train, y_train) rfr_rs.fit(final_X_train, y_train) ada_rs.fit(final_X_train, y_train) xgb_rs.fit(final_X_train, y_train) knn_rs.fit(final_X_train, y_train) rdg_rs.fit(final_X_train, y_train) # # # # # # # # EVALUATION # # # # # # # # print("Decision Tree best parameters:", dt_rs.best_params_) print("Random Forest best parameters:", rfr_rs.best_params_) print("Ada Boost best parameters:", ada_rs.best_params_) print("XGB best parameters:", xgb_rs.best_params_) print("KNN best parameters:", knn_rs.best_params_) print("Ridge best parameters:", rdg_rs.best_params_) # This gives us the best set of parameters correspoding to each model. # Next, we have to tune the optimized parameters onto the respective models: # # # # # # # # Decision Tree # # # # # # # # dt = DecisionTreeRegressor( criterion=dt_rs.best_params_["criterion"], splitter=dt_rs.best_params_["splitter"], max_depth=dt_rs.best_params_["max_depth"], max_features=dt_rs.best_params_["max_features"], min_samples_split=dt_rs.best_params_["min_samples_split"], min_samples_leaf=dt_rs.best_params_["min_samples_leaf"], random_state=42, ) # # # # # # # # Random Forest # # # # # # # # rfr = RandomForestRegressor( criterion=rfr_rs.best_params_["criterion"], n_estimators=rfr_rs.best_params_["n_estimators"], max_depth=rfr_rs.best_params_["max_depth"], max_features=rfr_rs.best_params_["max_depth"], min_samples_split=rfr_rs.best_params_["min_samples_split"], min_samples_leaf=rfr_rs.best_params_["min_samples_leaf"], random_state=42, ) # # # # # # # # ADA Boost # # # # # # # # ada = AdaBoostRegressor( n_estimators=ada_rs.best_params_["n_estimators"], learning_rate=ada_rs.best_params_["learning_rate"], loss=ada_rs.best_params_["loss"], random_state=42, ) # # # # # # # # XG Boost # # # # # # # # xgb = XGBRegressor( n_estimators=xgb_rs.best_params_["n_estimators"], learning_rate=xgb_rs.best_params_["learning_rate"], min_child_weight=xgb_rs.best_params_["min_child_weight"], random_state=42, eval_metric="logloss", ) # # # # # # # # K-Nearest Neighbors # # # # # # # # knn = KNeighborsRegressor( n_neighbors=knn_rs.best_params_["n_neighbors"], weights=knn_rs.best_params_["weights"], algorithm=knn_rs.best_params_["algorithm"], ) # # # # # # # # Ridge # # # # # # # # rdg = Ridge(alpha=rdg_rs.best_params_["alpha"], solver=rdg_rs.best_params_["solver"]) models = [ (dt, "Decision Tree"), (rfr, "Random Forest"), (ada, "Ada Boost"), (xgb, "XG Boost"), (knn, "K Neighbors"), (rdg, "Ridge"), ] # After tuning the best suitable parameters to each and every model, we will now measure how well these models do with respect to some scoring criterias. final_X_valid = final_X_valid.fillna(0) final_X_test = final_X_test.fillna(0) # dataframe to keep track of scores of various models evaluations = pd.DataFrame({"Model": [], "MAE": [], "MSE": [], "RMSE": []}) # function that evaluates and returns different scores obtained by a model def evaluate(actual, preds): mae = mean_absolute_error(actual, preds) mse = mean_squared_error(actual, preds) rmse = mean_squared_error(actual, preds, squared=False) return (mae, mse, rmse) # Fitting and evaluating the models one by one for model, model_name in models: model.fit(final_X_train, y_train) preds = model.predict(final_X_valid) mae, mse, rmse = evaluate(y_valid, preds) cur_model = {"Model": model_name, "MAE": mae, "MSE": mse, "RMSE": rmse} evaluations = evaluations.append(cur_model, ignore_index=True) # print('Model: {} f1: {:.3f} accuracy: {:.3f}'.format(model_name, f1, accuracy)) evaluations.set_index("Model", inplace=True) evaluations # Out of all the models, *Ridge* has shown the best results. Thus, we'll base our final test predictions on the rdg model. # # **6. PREDICTION** # With everything prepared, now we just have to predict target value in the test set based on the model we'd fit on the training set. final_X_test.shape # make predictions test_predictions = rdg.predict(final_X_test) # convert to csv output = pd.DataFrame({"Id": final_X_test.index, "SalePrice": test_predictions}) output.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/235/69235929.ipynb
null
null
[{"Id": 69235929, "ScriptId": 18878890, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7640936, "CreationDate": "07/28/2021 12:54:11", "VersionNumber": 2.0, "Title": "Housing Prices", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 612.0, "LinesInsertedFromPrevious": 15.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 597.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # **IMPORT LIBRARIES** import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scipy import stats from scipy.stats import norm import warnings warnings.filterwarnings("ignore") # # **IMPORT DATASET** train_df = pd.read_csv("../input/home-data-for-ml-course/train.csv", index_col="Id") test_df = pd.read_csv("../input/home-data-for-ml-course/test.csv", index_col="Id") train_df.shape # There are a total of *1460* training examples, having 80 features and 1 target column, i.e. *SalePrice*. train_df.info() # A lot of missing values in *Alley*, *PoolQC*, *Fence*, and *MiscFeature*. Thus, it would be reasonable to remove these features. train_df.drop(["Alley", "PoolQC", "Fence", "MiscFeature"], axis=1, inplace=True) test_df.drop(["Alley", "PoolQC", "Fence", "MiscFeature"], axis=1, inplace=True) pd.set_option("display.max_columns", 100) train_df.describe() train_df.head(10) # # **1. EXPLORATORY DATA ANALYSIS** # Classifying columns as *categorical* and *numerical*: def col_type(col): if (col.nunique() <= 10) | (col.dtype == "object"): return "cat" else: return "num" categorical_col = [ cname for cname in train_df.columns if col_type(train_df[cname]) == "cat" ] numerical_col = [ cname for cname in train_df.columns if col_type(train_df[cname]) == "num" ] print("Categorical Columns: {}".format(categorical_col)) print("Numerical Columns: {}".format(numerical_col)) # However, owing to very few number of houses with a pool, *PoolArea* got wrongly classified as a categorical feature. numerical_col.append("poolArea") categorical_col.remove("PoolArea") corrMat = train_df.corr() f, ax = plt.subplots(figsize=(20, 15)) cmap = sns.diverging_palette(230, 20, as_cmap=True) mask = np.triu(np.ones_like(corrMat, dtype=bool)) sns.heatmap(corrMat, square=True, annot=True, cmap=cmap, mask=mask) # Let's go with the following rules of thumb: # **1.** Any two predictor variables having correlation coefficient greater than or equal to 0.7 can be said to have high positive correlation, and thus, any one of the two could be removed. # **2.** Same goes for pairs of predictor variables having correlation coefficient less than or equal to -0.7. # **3.** Predictor variables having correlation greater than 0.6 or less than -0.6 with the target *SalePrice* can be labeled as important predictors/features. related = corrMat[corrMat.abs() >= 0.6] plt.figure(figsize=(20, 15)) mask = np.triu(np.ones_like(related, dtype=bool)) sns.heatmap(related, annot=True, square=True, mask=mask) # From the above plot: # **1.** Pairs *YearBuilt* & *GarageYrBlt*, *GarageArea* & *GarageCars*, *TotRmsAbvGrd* & *GrLivArea*, *2ndFlrSF* & *GrLivArea*, *1stFlrSF* & *TotalBsmtSF* have high correlations and hence one from each pair can be removed. Say, we will remove *GarageYrBlt*, *GarageCars*, *TotRmsAbvGrd*, and *1stFlrSF*. # **2.** Predictors *OverallQual*, *GrLivArea*, *TotalBsmtSF*, and *GarageArea* have high correlation with target *SalePrice*. Hence, these are our important features. removables = ["GarageYrBlt", "GarageCars", "TotRmsAbvGrd", "1stFlrSF", "2ndFlrSF"] categorical_col.remove("GarageCars") # Now, we are going to look for predictors that have very low correlation with the target *SalePrice*, i.e. features that have little to no effect on our target variable. # For this, we will look into our correlation matrix and extract only those features that have correlation coefficient value between -0.1 and 0.1 with the target *SalePrice*. # **Note:** Dropping features based on their feature-to-target-correlation essentially is a form of feature filtering. It is important to understand that feature filtering does not necessarily improve predictive performance. This is because for complex regression models, these features might get coupled with other features and contribute to the predictive performance of the model. # For the above reason, we are not going to remove these features from the training set just yet. Instead, we will observe how a particular model would perform with and without these features. Then we can take that result to determine whether or not to keep these features. plt.figure(figsize=(5, 15)) S = corrMat[["SalePrice"]] sns.heatmap(S, annot=True) lowCorrFeatures = [] for col in corrMat.index: if (col in numerical_col) & (-0.1 <= corrMat[col]["SalePrice"] <= 0.1): lowCorrFeatures.append(col) print(lowCorrFeatures) # removing all the removable predictors from dataframe train_df.drop(removables, axis=1, inplace=True) test_df.drop(removables, axis=1, inplace=True) train_df.drop(lowCorrFeatures, axis=1, inplace=True) test_df.drop(lowCorrFeatures, axis=1, inplace=True) train_df.shape # # **2. DATA CLEANING** # Before we jump into data cleaning, it is good practice to first split up the dataset into train and validation sets. from sklearn.model_selection import train_test_split X = train_df.copy() y = X.SalePrice X.drop("SalePrice", axis=1, inplace=True) X_train, X_valid, y_train, y_valid = train_test_split( X, y, test_size=0.2, random_state=42 ) X_test = test_df.copy() # print(X_train.shape, y_train.shape) -> (1168, 70) (1168,) # First of all, determine the amount and type of missing data in the given dataframe. missing_val_cols = X_train.isnull().sum().sort_values(ascending=False) missing_val_cols = missing_val_cols[missing_val_cols > 0] ratio_of_missing = missing_val_cols / X_train.shape[0] missing_cols = pd.concat( [missing_val_cols, ratio_of_missing * 100], axis=1, keys=["Count", "%"] ) missing_cols # For 'Electrical', 'MasVnrArea' and 'MasVnrType', removing the entries with missing values. y_train.drop(X_train.loc[X_train.Electrical.isnull()].index, axis=0, inplace=True) X_train.drop(X_train.loc[X_train.Electrical.isnull()].index, axis=0, inplace=True) # y_train.drop(X_train.loc[X_train.MasVnrArea.isnull()].index, axis = 0, inplace = True) # X_train.drop(X_train.loc[X_train.MasVnrArea.isnull()].index, axis = 0, inplace = True) # y_train.drop(X_train.loc[X_train.MasVnrType.isnull()].index, axis = 0, inplace = True) # X_train.drop(X_train.loc[X_train.MasVnrType.isnull()].index, axis = 0, inplace = True) # For The following features, it's apparent that a missing entry is a direct indication # of absence of the corresponding functionality. Thus, treating NaN as a separate # class 'missing' would be wise. X_train["BsmtQual"] = X_train["BsmtQual"].fillna("missing") X_train["BsmtCond"] = X_train["BsmtCond"].fillna("missing") X_train["BsmtExposure"] = X_train["BsmtExposure"].fillna("missing") X_train["BsmtFinType1"] = X_train["BsmtFinType1"].fillna("missing") X_train["BsmtFinType2"] = X_train["BsmtFinType2"].fillna("missing") X_train["GarageCond"] = X_train["GarageCond"].fillna("missing") X_train["GarageQual"] = X_train["GarageQual"].fillna("missing") X_train["GarageFinish"] = X_train["GarageFinish"].fillna("missing") X_train["GarageType"] = X_train["GarageType"].fillna("missing") X_train["MasVnrArea"] = X_train["MasVnrArea"].fillna(0) X_train["MasVnrType"] = X_train["MasVnrType"].fillna("missing") X_train["FireplaceQu"] = X_train["FireplaceQu"].fillna("missing") # Same for test set X_test["BsmtQual"] = X_test["BsmtQual"].fillna("missing") X_test["BsmtCond"] = X_test["BsmtCond"].fillna("missing") X_test["BsmtExposure"] = X_test["BsmtExposure"].fillna("missing") X_test["BsmtFinType1"] = X_test["BsmtFinType1"].fillna("missing") X_test["BsmtFinType2"] = X_test["BsmtFinType2"].fillna("missing") X_test["GarageCond"] = X_test["GarageCond"].fillna("missing") X_test["GarageQual"] = X_test["GarageQual"].fillna("missing") X_test["GarageFinish"] = X_test["GarageFinish"].fillna("missing") X_test["GarageType"] = X_test["GarageType"].fillna("missing") X_test["Electrical"] = X_test["Electrical"].fillna("SBrkr") X_test["MasVnrArea"] = X_test["MasVnrArea"].fillna(0) X_test["MasVnrType"] = X_test["MasVnrType"].fillna("missing") X_test["FireplaceQu"] = X_test["FireplaceQu"].fillna("missing") # For GarageCond, rows with missing field overlaps with rows having missing fields for # other Garage related features. Same goes for BsmtCond. Thus, by deleting just these rows # we can effectively deal with all related missing entries. # X_train.drop(X_train.loc[X_train.GarageCond.isnull()].index, axis = 0, inplace = True) # X_train.drop(X_train.loc[X_train.BsmtCond.isnull()].index, axis = 0, inplace = True) # garage_removables = ['GarageCond', 'GarageType', 'GarageFinish', 'GarageQual'] # removables.append(garage_removables) # X_train.drop(garage_removables, axis = 1, inplace = True) # X_valid.drop(garage_removables, axis = 1, inplace = True) print(X_train.shape) print(y_train.shape) # For *LotFrontage*: # 1. Dropping the column is out the question as in the actual market scenario, measure of linear feet of street connected to the property has significant effect on the price. # 2. For this exact reason, imputing the missing values would result in nonsensical data. # 3. Deleting rows with missing field would mean throwing away about 20% of the training dataset. # We can further confirm the effect of *LotFrontage* on target variable with the help of the following plot: full_train = pd.concat([X_train, y_train], axis=1) plt.figure(figsize=(12, 6)) sns.lmplot(x="LotFrontage", y="SalePrice", data=full_train) plt.show() # As is obvious from the above plot, there is plenty positive correlation between the two. # Therefore, we need to proceed with caution while dealing with this feature. # One way to deal with the situation is to build a model that predicts the missing values of *LotFrontage*. Here, we treat *LotFrontage* as the dependent variable while the other features would be independent variables. # Encoding categorical columns from X_train and storing in new dataframe. temp_df = pd.get_dummies(X_train, columns=categorical_col, drop_first=True) # Dataframe consisting of non-null values of LotFrontage. This will serve as training set. df_non_nulls = temp_df[temp_df["LotFrontage"].notnull()].copy() lot_y = df_non_nulls.LotFrontage.copy() lot_X = df_non_nulls.drop("LotFrontage", axis=1, inplace=False) lot_x_train, lot_x_valid, lot_y_train, lot_y_valid = train_test_split( lot_X, lot_y, test_size=0.2, random_state=42 ) # Fitting model on the training set with target LotFrontage. from sklearn.ensemble import RandomForestRegressor rfr = RandomForestRegressor(max_depth=25, min_samples_leaf=2, random_state=0) rfr.fit(lot_x_train, lot_y_train) score = rfr.score(lot_x_valid, lot_y_valid) # print(score) -> 0.6335685958712058 # Dataframe consisting of null values of LotFrontage. This is our test set as here we # don't know a single value of LotFrontage. df_nulls = temp_df[temp_df["LotFrontage"].isnull()].copy() df_nulls.drop("LotFrontage", axis=1, inplace=True) df_nulls["LotFrontage"] = rfr.predict(df_nulls) # Now copying the predicted values of LotFrontage back to the original training set. for idx in df_nulls.index: X_train.loc[X_train.index == idx, "LotFrontage"] = df_nulls.loc[idx]["LotFrontage"] X_train.isnull().any().any() # This concludes the part of dealing with missing values. (**Finally!!**) missing_val_cols_valid = X_valid.isnull().sum().sort_values(ascending=False) missing_val_cols_valid = missing_val_cols_valid[missing_val_cols_valid > 0] ratio_of_missing_valid = missing_val_cols_valid / X_valid.shape[0] missing_cols_valid = pd.concat( [missing_val_cols_valid, ratio_of_missing_valid * 100], axis=1, keys=["Count", "%"] ) missing_cols_valid # Applying the same imputation rules as the training set: y_valid.drop(X_valid.loc[X_valid.MasVnrArea.isnull()].index, axis=0, inplace=True) X_valid.drop(X_valid.loc[X_valid.MasVnrArea.isnull()].index, axis=0, inplace=True) y_valid.drop(X_valid.loc[X_valid.MasVnrType.isnull()].index, axis=0, inplace=True) X_valid.drop(X_valid.loc[X_valid.MasVnrType.isnull()].index, axis=0, inplace=True) X_valid["BsmtQual"] = X_valid["BsmtQual"].fillna("missing") X_valid["BsmtCond"] = X_valid["BsmtCond"].fillna("missing") X_valid["BsmtExposure"] = X_valid["BsmtExposure"].fillna("missing") X_valid["BsmtFinType1"] = X_valid["BsmtFinType1"].fillna("missing") X_valid["BsmtFinType2"] = X_valid["BsmtFinType2"].fillna("missing") X_valid["GarageCond"] = X_valid["GarageCond"].fillna("missing") X_valid["GarageQual"] = X_valid["GarageQual"].fillna("missing") X_valid["GarageFinish"] = X_valid["GarageFinish"].fillna("missing") X_valid["GarageType"] = X_valid["GarageType"].fillna("missing") X_valid["FireplaceQu"] = X_valid["FireplaceQu"].fillna("missing") temp_df_valid = pd.get_dummies(X_valid, columns=categorical_col, drop_first=True) temp_df, temp_df_valid = temp_df.align(temp_df_valid, join="left", axis=1) df_nulls_valid = temp_df_valid[temp_df_valid.LotFrontage.isnull()].copy() df_nulls_valid.drop("LotFrontage", axis=1, inplace=True) df_nulls_valid = df_nulls_valid.fillna(0) df_nulls_valid["LotFrontage"] = rfr.predict(df_nulls_valid) for idx in df_nulls_valid.index: X_valid.loc[X_valid.index == idx, "LotFrontage"] = df_nulls_valid.loc[idx][ "LotFrontage" ] # Same for test set temp_df_test = pd.get_dummies(X_test, columns=categorical_col, drop_first=True) temp_df, temp_df_test = temp_df.align(temp_df_test, join="left", axis=1) df_nulls_test = temp_df_test[temp_df_test.LotFrontage.isnull()].copy() df_nulls_test.drop("LotFrontage", axis=1, inplace=True) df_nulls_test = df_nulls_test.fillna(0) df_nulls_test["LotFrontage"] = rfr.predict(df_nulls_test) for idx in df_nulls_test.index: X_test.loc[X_test.index == idx, "LotFrontage"] = df_nulls_test.loc[idx][ "LotFrontage" ] X_valid.isnull().any().any() # # **3. ENCODING** # Firstly, let us look at the categorical features we have: print(categorical_col) # There are many rating variables (i.e. 1-10), which will be better off in the numerical variables set. numerical_col.append(["OverallQual", "OverallCond"]) categorical_col.remove("OverallQual") categorical_col.remove("OverallCond") # Updated list of categorical variables: print(categorical_col) # Out of these, there are many ordinal variables and many nominal ones. So, appropriate encoding techniques need to be used. # **FOR ORDINAL VARIABLES:** # First off, we have to hand pick all ordinal variables. For that, examining the *data_description.txt* file would help. ordinal_variables = [ "LotShape", "Utilities", "LandSlope", "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2", "HeatingQC", "KitchenQual", "Functional", "FireplaceQu", "GarageFinish", "GarageQual", "GarageCond", "PavedDrive", ] nominal_variables = list(set(categorical_col) - set(ordinal_variables)) from category_encoders.ordinal import OrdinalEncoder oe = OrdinalEncoder( cols=ordinal_variables, mapping=[ {"col": "LotShape", "mapping": {"IR3": 0, "IR2": 1, "IR1": 2, "Reg": 3}}, { "col": "Utilities", "mapping": {"ELO": 0, "NoSeWa": 1, "NoSeWr": 2, "AllPub": 3}, }, {"col": "LandSlope", "mapping": {"Sev": 0, "Mod": 1, "Gtl": 2}}, {"col": "ExterQual", "mapping": {"Po": 0, "Fa": 1, "TA": 2, "Gd": 3, "Ex": 4}}, {"col": "ExterCond", "mapping": {"Po": 0, "Fa": 1, "TA": 2, "Gd": 3, "Ex": 4}}, { "col": "BsmtQual", "mapping": {"missing": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}, }, { "col": "BsmtCond", "mapping": {"missing": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}, }, { "col": "BsmtExposure", "mapping": {"missing": 0, "No": 1, "Mn": 2, "Av": 3, "Gd": 4}, }, { "col": "BsmtFinType1", "mapping": { "missing": 0, "Unf": 1, "LwQ": 2, "Rec": 3, "BLQ": 4, "ALQ": 5, "GLQ": 6, }, }, { "col": "BsmtFinType2", "mapping": { "missing": 0, "Unf": 1, "LwQ": 2, "Rec": 3, "BLQ": 4, "ALQ": 5, "GLQ": 6, }, }, {"col": "HeatingQC", "mapping": {"Po": 0, "Fa": 1, "TA": 2, "Gd": 3, "Ex": 4}}, { "col": "KitchenQual", "mapping": {"Po": 0, "Fa": 1, "TA": 2, "Gd": 3, "Ex": 4}, }, { "col": "Functional", "mapping": { "Sal": 0, "Sev": 1, "Maj2": 2, "Maj1": 3, "Mod": 4, "Min2": 5, "Min1": 6, "Typ": 7, }, }, { "col": "FireplaceQu", "mapping": {"missing": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}, }, { "col": "GarageFinish", "mapping": {"missing": 0, "Unf": 1, "Rfn": 2, "Fin": 3}, }, { "col": "GarageQual", "mapping": {"missing": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}, }, { "col": "GarageCond", "mapping": {"missing": 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}, }, {"col": "PavedDrive", "mapping": {"N": 0, "P": 1, "Y": 2}}, ], ) X_train = oe.fit_transform(X_train) X_valid = oe.transform(X_valid) X_test = oe.transform(X_test) # print(X_train.shape) -> (1161, 70) # print(X_valid.shape) -> (290, 70) # **FOR NOMINAL VARIABLES:** For these, we will use the good ol' one hot encoding technique. X_train = pd.get_dummies(X_train) X_valid = pd.get_dummies(X_valid) X_test = pd.get_dummies(X_test) X_train, X_valid = X_train.align(X_valid, join="left", axis=1) X_train, X_test = X_train.align(X_test, join="left", axis=1) # print(X_train.shape) -> (1161, 208) # print(X_valid.shape) -> (290, 208) # # **4. FEATURE SCALING** # Many machine learning algorithms perform better when numerical input variables are scaled to a standard range. # Standardizing is a popular scaling technique that subtracts the mean from values and divides by the standard deviation, transforming the probability distribution for an input variable to a standard Gaussian (zero mean and unit variance). Standardization can become skewed or biased if the input variable contains outlier values. # To overcome this, the median and interquartile range can be used when standardizing numerical input variables, generally referred to as robust scaling. from sklearn.preprocessing import StandardScaler, RobustScaler scaler = RobustScaler() # To make dataset robust to outliers. robust_X_train = scaler.fit_transform(X_train) robust_X_valid = scaler.transform(X_valid) robust_X_test = scaler.transform(X_test) sc = StandardScaler() scaled_X_train = sc.fit_transform(robust_X_train) scaled_X_valid = sc.transform(robust_X_valid) scaled_X_test = sc.transform(robust_X_test) # On passing through the Scalers, our Data Frame has now been converted to a numpy array. So, for convention, we will convert the array back to a Data Frame. final_X_train = pd.DataFrame( scaled_X_train, index=X_train.index, columns=X_train.columns ) final_X_valid = pd.DataFrame( scaled_X_valid, index=X_valid.index, columns=X_valid.columns ) final_X_test = pd.DataFrame(scaled_X_test, index=X_test.index, columns=X_test.columns) final_X_train.describe() # With this, we are now done with data anaytics. # # **5. MODEL FITTING AND EVALUATION** # It goes without saying that for a dataset having 200+ features, we have to employ complex curve fitting regression techniques. Some regression techniques we are going to use are: # 1. Decision Tree Regressor # 2. Random Forest Regressor # 3. XG Boost Regressor # 4. Ada Boost Regressor # 5. K-Nearest-Neighbors Regressor # 6. Ridge Regressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor from xgboost import XGBRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import Ridge # For parameter selection and comparison, use RandomizedSearchCV from sklearn.model_selection import RandomizedSearchCV dt = DecisionTreeRegressor() rfr = RandomForestRegressor() ada = AdaBoostRegressor() xgb = XGBRegressor() knn = KNeighborsRegressor() rdg = Ridge() # Next, we will use *RandomizedSearchCV* to define and test different combinations of parameters. This is an important step as many a times simply hypertuning the parameters can drastically improve the model performance. from sklearn.metrics import make_scorer, mean_absolute_error, mean_squared_error from scipy.stats import uniform, randint scorer = make_scorer(mean_absolute_error) # Mean Absolute Error as scorer # # # # # # # # Decision Tree # # # # # # # # dt_params = { "criterion": ["mae", "friedman_mse"], "splitter": ["best", "random"], "max_depth": np.arange(1, 50), "max_features": ["auto", "log2", "sqrt"], "min_samples_split": [2, 5, 10], "min_samples_leaf": [2, 3, 4, 5, 10, 15, 20], } dt_rs = RandomizedSearchCV(dt, param_distributions=dt_params, scoring=scorer, cv=5) # # # # # # # # Random Forest # # # # # # # # rfr_params = { "criterion": ["mse", "mae"], "n_estimators": [50, 100, 200, 400, 500], "max_depth": np.arange(1, 50), "max_features": ["auto", "log2", "sqrt"], "min_samples_split": [2, 5, 10], "min_samples_leaf": [2, 3, 4, 5, 10, 20, 30, 40, 50], } rfr_rs = RandomizedSearchCV(rfr, param_distributions=rfr_params, scoring=scorer, cv=5) # # # # # # # # ADA Boost # # # # # # # # ada_params = { "n_estimators": [50, 100, 200, 400, 500], "learning_rate": np.arange(0, 1, 0.01), "loss": ["linear", "square"], } ada_rs = RandomizedSearchCV(ada, param_distributions=ada_params, scoring=scorer, cv=5) # # # # # # # # XG Boost # # # # # # # # xgb_params = { "n_estimators": [50, 100, 200, 300, 400, 500], "learning_rate": np.arange(0, 1, 0.01), "min_child_weight": np.arange(1, 10), } xgb_rs = RandomizedSearchCV(xgb, param_distributions=xgb_params, scoring=scorer, cv=5) # # # # # # # # K-Nearest Neighbors # # # # # # # # knn_params = { "n_neighbors": np.arange(1, 50), "weights": ["uniform", "distance"], "algorithm": ["auto", "ball_tree", "kd_tree"], } knn_rs = RandomizedSearchCV(knn, param_distributions=knn_params, scoring=scorer, cv=5) # # # # # # # # Ridge # # # # # # # # rdg_params = {"alpha": np.arange(0, 1, 0.01), "solver": ["auto", "lsqr", "saga"]} rdg_rs = RandomizedSearchCV(rdg, param_distributions=rdg_params, scoring=scorer, cv=5) # Now that we have our models and their hyperparameter combinations defined, let's fit each model on the training set and evaluate their scores. # # # # # # # # FITTING # # # # # # # # dt_rs.fit(final_X_train, y_train) rfr_rs.fit(final_X_train, y_train) ada_rs.fit(final_X_train, y_train) xgb_rs.fit(final_X_train, y_train) knn_rs.fit(final_X_train, y_train) rdg_rs.fit(final_X_train, y_train) # # # # # # # # EVALUATION # # # # # # # # print("Decision Tree best parameters:", dt_rs.best_params_) print("Random Forest best parameters:", rfr_rs.best_params_) print("Ada Boost best parameters:", ada_rs.best_params_) print("XGB best parameters:", xgb_rs.best_params_) print("KNN best parameters:", knn_rs.best_params_) print("Ridge best parameters:", rdg_rs.best_params_) # This gives us the best set of parameters correspoding to each model. # Next, we have to tune the optimized parameters onto the respective models: # # # # # # # # Decision Tree # # # # # # # # dt = DecisionTreeRegressor( criterion=dt_rs.best_params_["criterion"], splitter=dt_rs.best_params_["splitter"], max_depth=dt_rs.best_params_["max_depth"], max_features=dt_rs.best_params_["max_features"], min_samples_split=dt_rs.best_params_["min_samples_split"], min_samples_leaf=dt_rs.best_params_["min_samples_leaf"], random_state=42, ) # # # # # # # # Random Forest # # # # # # # # rfr = RandomForestRegressor( criterion=rfr_rs.best_params_["criterion"], n_estimators=rfr_rs.best_params_["n_estimators"], max_depth=rfr_rs.best_params_["max_depth"], max_features=rfr_rs.best_params_["max_depth"], min_samples_split=rfr_rs.best_params_["min_samples_split"], min_samples_leaf=rfr_rs.best_params_["min_samples_leaf"], random_state=42, ) # # # # # # # # ADA Boost # # # # # # # # ada = AdaBoostRegressor( n_estimators=ada_rs.best_params_["n_estimators"], learning_rate=ada_rs.best_params_["learning_rate"], loss=ada_rs.best_params_["loss"], random_state=42, ) # # # # # # # # XG Boost # # # # # # # # xgb = XGBRegressor( n_estimators=xgb_rs.best_params_["n_estimators"], learning_rate=xgb_rs.best_params_["learning_rate"], min_child_weight=xgb_rs.best_params_["min_child_weight"], random_state=42, eval_metric="logloss", ) # # # # # # # # K-Nearest Neighbors # # # # # # # # knn = KNeighborsRegressor( n_neighbors=knn_rs.best_params_["n_neighbors"], weights=knn_rs.best_params_["weights"], algorithm=knn_rs.best_params_["algorithm"], ) # # # # # # # # Ridge # # # # # # # # rdg = Ridge(alpha=rdg_rs.best_params_["alpha"], solver=rdg_rs.best_params_["solver"]) models = [ (dt, "Decision Tree"), (rfr, "Random Forest"), (ada, "Ada Boost"), (xgb, "XG Boost"), (knn, "K Neighbors"), (rdg, "Ridge"), ] # After tuning the best suitable parameters to each and every model, we will now measure how well these models do with respect to some scoring criterias. final_X_valid = final_X_valid.fillna(0) final_X_test = final_X_test.fillna(0) # dataframe to keep track of scores of various models evaluations = pd.DataFrame({"Model": [], "MAE": [], "MSE": [], "RMSE": []}) # function that evaluates and returns different scores obtained by a model def evaluate(actual, preds): mae = mean_absolute_error(actual, preds) mse = mean_squared_error(actual, preds) rmse = mean_squared_error(actual, preds, squared=False) return (mae, mse, rmse) # Fitting and evaluating the models one by one for model, model_name in models: model.fit(final_X_train, y_train) preds = model.predict(final_X_valid) mae, mse, rmse = evaluate(y_valid, preds) cur_model = {"Model": model_name, "MAE": mae, "MSE": mse, "RMSE": rmse} evaluations = evaluations.append(cur_model, ignore_index=True) # print('Model: {} f1: {:.3f} accuracy: {:.3f}'.format(model_name, f1, accuracy)) evaluations.set_index("Model", inplace=True) evaluations # Out of all the models, *Ridge* has shown the best results. Thus, we'll base our final test predictions on the rdg model. # # **6. PREDICTION** # With everything prepared, now we just have to predict target value in the test set based on the model we'd fit on the training set. final_X_test.shape # make predictions test_predictions = rdg.predict(final_X_test) # convert to csv output = pd.DataFrame({"Id": final_X_test.index, "SalePrice": test_predictions}) output.to_csv("submission.csv", index=False)
false
0
9,294
0
9,294
9,294
69235129
# # COMPREHENSIVE DATA EXPLORATION WITH PYTHON # [Pedro Marcelino](http://pmarcelino.com) - February 2017 # Other Kernels: [Data analysis and feature extraction with Python # ](https://www.kaggle.com/pmarcelino/data-analysis-and-feature-extraction-with-python) # ---------- # 'The most difficult thing in life is to know yourself' # This quote belongs to Thales of Miletus. Thales was a Greek/Phonecian philosopher, mathematician and astronomer, which is recognised as the first individual in Western civilisation known to have entertained and engaged in scientific thought (source: https://en.wikipedia.org/wiki/Thales) # I wouldn't say that knowing your data is the most difficult thing in data science, but it is time-consuming. Therefore, it's easy to overlook this initial step and jump too soon into the water. # So I tried to learn how to swim before jumping into the water. Based on [Hair et al. (2013)](https://amzn.to/2JuDmvo), chapter 'Examining your data', I did my best to follow a comprehensive, but not exhaustive, analysis of the data. I'm far from reporting a rigorous study in this kernel, but I hope that it can be useful for the community, so I'm sharing how I applied some of those data analysis principles to this problem. # Despite the strange names I gave to the chapters, what we are doing in this kernel is something like: # 1. Understand the problem. We'll look at each variable and do a philosophical analysis about their meaning and importance for this problem. # 2. Univariable study. We'll just focus on the dependent variable ('SalePrice') and try to know a little bit more about it. # 3. Multivariate study. We'll try to understand how the dependent variable and independent variables relate. # 4. Basic cleaning. We'll clean the dataset and handle the missing data, outliers and categorical variables. # 5. Test assumptions. We'll check if our data meets the assumptions required by most multivariate techniques. # Now, it's time to have fun! # invite people for the Kaggle party import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np from scipy.stats import norm from sklearn.preprocessing import StandardScaler from scipy import stats import warnings warnings.filterwarnings("ignore") # bring in the six packs df_train = pd.read_csv("../input/train.csv") df_train.head() # check the decoration df_train.columns # # 1. So... What can we expect? # In order to understand our data, we can look at each variable and try to understand their meaning and relevance to this problem. I know this is time-consuming, but it will give us the flavour of our dataset. # In order to have some discipline in our analysis, we can create an Excel spreadsheet with the following columns: # * Variable - Variable name. # * Type - Identification of the variables' type. There are two possible values for this field: 'numerical' or 'categorical'. By 'numerical' we mean variables for which the values are numbers, and by 'categorical' we mean variables for which the values are categories. # * Segment - Identification of the variables' segment. We can define three possible segments: building, space or location. When we say 'building', we mean a variable that relates to the physical characteristics of the building (e.g. 'OverallQual'). When we say 'space', we mean a variable that reports space properties of the house (e.g. 'TotalBsmtSF'). Finally, when we say a 'location', we mean a variable that gives information about the place where the house is located (e.g. 'Neighborhood'). # * Expectation - Our expectation about the variable influence in 'SalePrice'. We can use a categorical scale with 'High', 'Medium' and 'Low' as possible values. # * Conclusion - Our conclusions about the importance of the variable, after we give a quick look at the data. We can keep with the same categorical scale as in 'Expectation'. # * Comments - Any general comments that occured to us. # While 'Type' and 'Segment' is just for possible future reference, the column 'Expectation' is important because it will help us develop a 'sixth sense'. To fill this column, we should read the description of all the variables and, one by one, ask ourselves: # * Do we think about this variable when we are buying a house? (e.g. When we think about the house of our dreams, do we care about its 'Masonry veneer type'?). # * If so, how important would this variable be? (e.g. What is the impact of having 'Excellent' material on the exterior instead of 'Poor'? And of having 'Excellent' instead of 'Good'?). # * Is this information already described in any other variable? (e.g. If 'LandContour' gives the flatness of the property, do we really need to know the 'LandSlope'?). # After this daunting exercise, we can filter the spreadsheet and look carefully to the variables with 'High' 'Expectation'. Then, we can rush into some scatter plots between those variables and 'SalePrice', filling in the 'Conclusion' column which is just the correction of our expectations. # I went through this process and concluded that the following variables can play an important role in this problem: # * OverallQual (which is a variable that I don't like because I don't know how it was computed; a funny exercise would be to predict 'OverallQual' using all the other variables available). # * YearBuilt. # * TotalBsmtSF. # * GrLivArea. # I ended up with two 'building' variables ('OverallQual' and 'YearBuilt') and two 'space' variables ('TotalBsmtSF' and 'GrLivArea'). This might be a little bit unexpected as it goes against the real estate mantra that all that matters is 'location, location and location'. It is possible that this quick data examination process was a bit harsh for categorical variables. For example, I expected the 'Neigborhood' variable to be more relevant, but after the data examination I ended up excluding it. Maybe this is related to the use of scatter plots instead of boxplots, which are more suitable for categorical variables visualization. The way we visualize data often influences our conclusions. # However, the main point of this exercise was to think a little about our data and expectactions, so I think we achieved our goal. Now it's time for 'a little less conversation, a little more action please'. Let's shake it! # # 2. First things first: analysing 'SalePrice' # 'SalePrice' is the reason of our quest. It's like when we're going to a party. We always have a reason to be there. Usually, women are that reason. (disclaimer: adapt it to men, dancing or alcohol, according to your preferences) # Using the women analogy, let's build a little story, the story of 'How we met 'SalePrice''. # *Everything started in our Kaggle party, when we were looking for a dance partner. After a while searching in the dance floor, we saw a girl, near the bar, using dance shoes. That's a sign that she's there to dance. We spend much time doing predictive modelling and participating in analytics competitions, so talking with girls is not one of our super powers. Even so, we gave it a try:* # *'Hi, I'm Kaggly! And you? 'SalePrice'? What a beautiful name! You know 'SalePrice', could you give me some data about you? I just developed a model to calculate the probability of a successful relationship between two people. I'd like to apply it to us!'* # descriptive statistics summary df_train["SalePrice"].describe() # print(df_train['SalePrice']) # *'Very well... It seems that your minimum price is larger than zero. Excellent! You don't have one of those personal traits that would destroy my model! Do you have any picture that you can send me? I don't know... like, you in the beach... or maybe a selfie in the gym?'* # histogram sns.distplot(df_train["SalePrice"]) # *'Ah! I see you that you use seaborn makeup when you're going out... That's so elegant! I also see that you:* # * *Deviate from the normal distribution.* # * *Have appreciable positive skewness.* # * *Show peakedness.* # *This is getting interesting! 'SalePrice', could you give me your body measures?'* # skewness and kurtosis print("Skewness: %f" % df_train["SalePrice"].skew()) print("Kurtosis: %f" % df_train["SalePrice"].kurt()) # # 'SalePrice', her buddies and her interests # ### Relationship with numerical variables # scatter plot grlivarea/saleprice var = "GrLivArea" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) data.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000)) print(data[:5]) # *Hmmm... It seems that 'SalePrice' and 'GrLivArea' are really old friends, with a linear relationship.* # *And what about 'TotalBsmtSF'?* # scatter plot totalbsmtsf/saleprice var = "TotalBsmtSF" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) data.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000)) print(data[:5]) # *'TotalBsmtSF' is also a great friend of 'SalePrice' but this seems a much more emotional relationship! Everything is ok and suddenly, in a strong linear (exponential?) reaction, everything changes. Moreover, it's clear that sometimes 'TotalBsmtSF' closes in itself and gives zero credit to 'SalePrice'.* # ### Relationship with categorical features # box plot overallqual/saleprice var = "OverallQual" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000) # *Like all the pretty girls, 'SalePrice' enjoys 'OverallQual'. Note to self: consider whether McDonald's is suitable for the first date.* var = "YearBuilt" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) f, ax = plt.subplots(figsize=(16, 8)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000) plt.xticks(rotation=90) # *Although it's not a strong tendency, I'd say that 'SalePrice' is more prone to spend more money in new stuff than in old relics.* # Note: we don't know if 'SalePrice' is in constant prices. Constant prices try to remove the effect of inflation. If 'SalePrice' is not in constant prices, it should be, so than prices are comparable over the years. # ### In summary # Stories aside, we can conclude that: # * 'GrLivArea' and 'TotalBsmtSF' seem to be linearly related with 'SalePrice'. Both relationships are positive, which means that as one variable increases, the other also increases. In the case of 'TotalBsmtSF', we can see that the slope of the linear relationship is particularly high. # * 'OverallQual' and 'YearBuilt' also seem to be related with 'SalePrice'. The relationship seems to be stronger in the case of 'OverallQual', where the box plot shows how sales prices increase with the overall quality. # We just analysed four variables, but there are many other that we should analyse. The trick here seems to be the choice of the right features (feature selection) and not the definition of complex relationships between them (feature engineering). # That said, let's separate the wheat from the chaff. # #### Correlation matrix (heatmap style) # correlation matrix corrmat = df_train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=0.8, square=True) # In my opinion, this heatmap is the best way to get a quick overview of our 'plasma soup' and its relationships. (Thank you @seaborn!) # At first sight, there are two red colored squares that get my attention. The first one refers to the 'TotalBsmtSF' and '1stFlrSF' variables, and the second one refers to the 'Garage*X*' variables. Both cases show how significant the correlation is between these variables. Actually, this correlation is so strong that it can indicate a situation of multicollinearity. If we think about these variables, we can conclude that they give almost the same information so multicollinearity really occurs. Heatmaps are great to detect this kind of situations and in problems dominated by feature selection, like ours, they are an essential tool. # Another thing that got my attention was the 'SalePrice' correlations. We can see our well-known 'GrLivArea', 'TotalBsmtSF', and 'OverallQual' saying a big 'Hi!', but we can also see many other variables that should be taken into account. That's what we will do next. # #### 'SalePrice' correlation matrix (zoomed heatmap style) # saleprice correlation matrix k = 10 # number of variables for heatmap cols = corrmat.nlargest(k, "SalePrice")["SalePrice"].index cm = np.corrcoef(df_train[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap( cm, cbar=True, annot=True, square=True, fmt=".2f", annot_kws={"size": 10}, yticklabels=cols.values, xticklabels=cols.values, ) plt.show() # According to our crystal ball, these are the variables most correlated with 'SalePrice'. My thoughts on this: # * 'OverallQual', 'GrLivArea' and 'TotalBsmtSF' are strongly correlated with 'SalePrice'. Check! # * 'GarageCars' and 'GarageArea' are also some of the most strongly correlated variables. However, as we discussed in the last sub-point, the number of cars that fit into the garage is a consequence of the garage area. 'GarageCars' and 'GarageArea' are like twin brothers. You'll never be able to distinguish them. Therefore, we just need one of these variables in our analysis (we can keep 'GarageCars' since its correlation with 'SalePrice' is higher). # * 'TotalBsmtSF' and '1stFloor' also seem to be twin brothers. We can keep 'TotalBsmtSF' just to say that our first guess was right (re-read 'So... What can we expect?'). # * 'FullBath'?? Really? # * 'TotRmsAbvGrd' and 'GrLivArea', twin brothers again. Is this dataset from Chernobyl? # * Ah... 'YearBuilt'... It seems that 'YearBuilt' is slightly correlated with 'SalePrice'. Honestly, it scares me to think about 'YearBuilt' because I start feeling that we should do a little bit of time-series analysis to get this right. I'll leave this as a homework for you. # Let's proceed to the scatter plots. # #### Scatter plots between 'SalePrice' and correlated variables (move like Jagger style) # Get ready for what you're about to see. I must confess that the first time I saw these scatter plots I was totally blown away! So much information in so short space... It's just amazing. Once more, thank you @seaborn! You make me 'move like Jagger'! # scatterplot sns.set() cols = [ "SalePrice", "OverallQual", "GrLivArea", "GarageCars", "TotalBsmtSF", "FullBath", "YearBuilt", ] sns.pairplot(df_train[cols], size=2.5) plt.show() # Although we already know some of the main figures, this mega scatter plot gives us a reasonable idea about variables relationships. # One of the figures we may find interesting is the one between 'TotalBsmtSF' and 'GrLiveArea'. In this figure we can see the dots drawing a linear line, which almost acts like a border. It totally makes sense that the majority of the dots stay below that line. Basement areas can be equal to the above ground living area, but it is not expected a basement area bigger than the above ground living area (unless you're trying to buy a bunker). # The plot concerning 'SalePrice' and 'YearBuilt' can also make us think. In the bottom of the 'dots cloud', we see what almost appears to be a shy exponential function (be creative). We can also see this same tendency in the upper limit of the 'dots cloud' (be even more creative). Also, notice how the set of dots regarding the last years tend to stay above this limit (I just wanted to say that prices are increasing faster now). # Ok, enough of Rorschach test for now. Let's move forward to what's missing: missing data! # # 4. Missing data # Important questions when thinking about missing data: # * How prevalent is the missing data? # * Is missing data random or does it have a pattern? # The answer to these questions is important for practical reasons because missing data can imply a reduction of the sample size. This can prevent us from proceeding with the analysis. Moreover, from a substantive perspective, we need to ensure that the missing data process is not biased and hidding an inconvenient truth. # missing data total = df_train.isnull().sum().sort_values(ascending=False) percent = (df_train.isnull().sum() / df_train.isnull().count()).sort_values( ascending=False ) missing_data = pd.concat([total, percent], axis=1, keys=["Total", "Percent"]) missing_data.head(20) # Let's analyse this to understand how to handle the missing data. # We'll consider that when more than 15% of the data is missing, we should delete the corresponding variable and pretend it never existed. This means that we will not try any trick to fill the missing data in these cases. According to this, there is a set of variables (e.g. 'PoolQC', 'MiscFeature', 'Alley', etc.) that we should delete. The point is: will we miss this data? I don't think so. None of these variables seem to be very important, since most of them are not aspects in which we think about when buying a house (maybe that's the reason why data is missing?). Moreover, looking closer at the variables, we could say that variables like 'PoolQC', 'MiscFeature' and 'FireplaceQu' are strong candidates for outliers, so we'll be happy to delete them. # In what concerns the remaining cases, we can see that 'Garage*X*' variables have the same number of missing data. I bet missing data refers to the same set of observations (although I will not check it; it's just 5% and we should not spend 20$ in 5$ problems). Since the most important information regarding garages is expressed by 'GarageCars' and considering that we are just talking about 5% of missing data, I'll delete the mentioned 'Garage*X*' variables. The same logic applies to 'Bsmt*X*' variables. # Regarding 'MasVnrArea' and 'MasVnrType', we can consider that these variables are not essential. Furthermore, they have a strong correlation with 'YearBuilt' and 'OverallQual' which are already considered. Thus, we will not lose information if we delete 'MasVnrArea' and 'MasVnrType'. # Finally, we have one missing observation in 'Electrical'. Since it is just one observation, we'll delete this observation and keep the variable. # In summary, to handle missing data, we'll delete all the variables with missing data, except the variable 'Electrical'. In 'Electrical' we'll just delete the observation with missing data. # dealing with missing data df_train = df_train.drop((missing_data[missing_data["Total"] > 1]).index, 1) df_train = df_train.drop(df_train.loc[df_train["Electrical"].isnull()].index) df_train.isnull().sum().max() # just checking that there's no missing data missing... # # Out liars! # Outliers is also something that we should be aware of. Why? Because outliers can markedly affect our models and can be a valuable source of information, providing us insights about specific behaviours. # Outliers is a complex subject and it deserves more attention. Here, we'll just do a quick analysis through the standard deviation of 'SalePrice' and a set of scatter plots. # ### Univariate analysis # The primary concern here is to establish a threshold that defines an observation as an outlier. To do so, we'll standardize the data. In this context, data standardization means converting data values to have mean of 0 and a standard deviation of 1. # standardizing data saleprice_scaled = StandardScaler().fit_transform(df_train["SalePrice"][:, np.newaxis]) low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10] high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:] print("outer range (low) of the distribution:") print(low_range) print("\nouter range (high) of the distribution:") print(high_range) # How 'SalePrice' looks with her new clothes: # * Low range values are similar and not too far from 0. # * High range values are far from 0 and the 7.something values are really out of range. # For now, we'll not consider any of these values as an outlier but we should be careful with those two 7.something values. # ### Bivariate analysis # We already know the following scatter plots by heart. However, when we look to things from a new perspective, there's always something to discover. As Alan Kay said, 'a change in perspective is worth 80 IQ points'. # bivariate analysis saleprice/grlivarea var = "GrLivArea" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) data.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000)) # What has been revealed: # * The two values with bigger 'GrLivArea' seem strange and they are not following the crowd. We can speculate why this is happening. Maybe they refer to agricultural area and that could explain the low price. I'm not sure about this but I'm quite confident that these two points are not representative of the typical case. Therefore, we'll define them as outliers and delete them. # * The two observations in the top of the plot are those 7.something observations that we said we should be careful about. They look like two special cases, however they seem to be following the trend. For that reason, we will keep them. # deleting points df_train.sort_values(by="GrLivArea", ascending=False)[:2] df_train = df_train.drop(df_train[df_train["Id"] == 1299].index) df_train = df_train.drop(df_train[df_train["Id"] == 524].index) # bivariate analysis saleprice/grlivarea var = "TotalBsmtSF" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) data.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000)) # We can feel tempted to eliminate some observations (e.g. TotalBsmtSF > 3000) but I suppose it's not worth it. We can live with that, so we'll not do anything. # # 5. Getting hard core # In Ayn Rand's novel, 'Atlas Shrugged', there is an often-repeated question: who is John Galt? A big part of the book is about the quest to discover the answer to this question. # I feel Randian now. Who is 'SalePrice'? # The answer to this question lies in testing for the assumptions underlying the statistical bases for multivariate analysis. We already did some data cleaning and discovered a lot about 'SalePrice'. Now it's time to go deep and understand how 'SalePrice' complies with the statistical assumptions that enables us to apply multivariate techniques. # According to [Hair et al. (2013)](https://amzn.to/2uC3j9p), four assumptions should be tested: # * Normality - When we talk about normality what we mean is that the data should look like a normal distribution. This is important because several statistic tests rely on this (e.g. t-statistics). In this exercise we'll just check univariate normality for 'SalePrice' (which is a limited approach). Remember that univariate normality doesn't ensure multivariate normality (which is what we would like to have), but it helps. Another detail to take into account is that in big samples (>200 observations) normality is not such an issue. However, if we solve normality, we avoid a lot of other problems (e.g. heteroscedacity) so that's the main reason why we are doing this analysis. # * Homoscedasticity - I just hope I wrote it right. Homoscedasticity refers to the 'assumption that dependent variable(s) exhibit equal levels of variance across the range of predictor variable(s)' [(Hair et al., 2013)](https://amzn.to/2uC3j9p). Homoscedasticity is desirable because we want the error term to be the same across all values of the independent variables. # * Linearity- The most common way to assess linearity is to examine scatter plots and search for linear patterns. If patterns are not linear, it would be worthwhile to explore data transformations. However, we'll not get into this because most of the scatter plots we've seen appear to have linear relationships. # * Absence of correlated errors - Correlated errors, like the definition suggests, happen when one error is correlated to another. For instance, if one positive error makes a negative error systematically, it means that there's a relationship between these variables. This occurs often in time series, where some patterns are time related. We'll also not get into this. However, if you detect something, try to add a variable that can explain the effect you're getting. That's the most common solution for correlated errors. # What do you think Elvis would say about this long explanation? 'A little less conversation, a little more action please'? Probably... By the way, do you know what was Elvis's last great hit? # (...) # The bathroom floor. # ### In the search for normality # The point here is to test 'SalePrice' in a very lean way. We'll do this paying attention to: # * Histogram - Kurtosis and skewness. # * Normal probability plot - Data distribution should closely follow the diagonal that represents the normal distribution. # histogram and normal probability plot sns.distplot(df_train["SalePrice"], fit=norm) fig = plt.figure() res = stats.probplot(df_train["SalePrice"], plot=plt) # Ok, 'SalePrice' is not normal. It shows 'peakedness', positive skewness and does not follow the diagonal line. # But everything's not lost. A simple data transformation can solve the problem. This is one of the awesome things you can learn in statistical books: in case of positive skewness, log transformations usually works well. When I discovered this, I felt like an Hogwarts' student discovering a new cool spell. # *Avada kedavra!* # applying log transformation df_train["SalePrice"] = np.log(df_train["SalePrice"]) # transformed histogram and normal probability plot sns.distplot(df_train["SalePrice"], fit=norm) fig = plt.figure() res = stats.probplot(df_train["SalePrice"], plot=plt) # Done! Let's check what's going on with 'GrLivArea'. # histogram and normal probability plot sns.distplot(df_train["GrLivArea"], fit=norm) fig = plt.figure() res = stats.probplot(df_train["GrLivArea"], plot=plt) # Tastes like skewness... *Avada kedavra!* # data transformation df_train["GrLivArea"] = np.log(df_train["GrLivArea"]) # transformed histogram and normal probability plot sns.distplot(df_train["GrLivArea"], fit=norm) fig = plt.figure() res = stats.probplot(df_train["GrLivArea"], plot=plt) # Next, please... # histogram and normal probability plot sns.distplot(df_train["TotalBsmtSF"], fit=norm) fig = plt.figure() res = stats.probplot(df_train["TotalBsmtSF"], plot=plt) # Ok, now we are dealing with the big boss. What do we have here? # * Something that, in general, presents skewness. # * A significant number of observations with value zero (houses without basement). # * A big problem because the value zero doesn't allow us to do log transformations. # To apply a log transformation here, we'll create a variable that can get the effect of having or not having basement (binary variable). Then, we'll do a log transformation to all the non-zero observations, ignoring those with value zero. This way we can transform data, without losing the effect of having or not basement. # I'm not sure if this approach is correct. It just seemed right to me. That's what I call 'high risk engineering'. # create column for new variable (one is enough because it's a binary categorical feature) # if area>0 it gets 1, for area==0 it gets 0 df_train["HasBsmt"] = pd.Series(len(df_train["TotalBsmtSF"]), index=df_train.index) df_train["HasBsmt"] = 0 df_train.loc[df_train["TotalBsmtSF"] > 0, "HasBsmt"] = 1 # transform data df_train.loc[df_train["HasBsmt"] == 1, "TotalBsmtSF"] = np.log(df_train["TotalBsmtSF"]) # histogram and normal probability plot sns.distplot(df_train[df_train["TotalBsmtSF"] > 0]["TotalBsmtSF"], fit=norm) fig = plt.figure() res = stats.probplot(df_train[df_train["TotalBsmtSF"] > 0]["TotalBsmtSF"], plot=plt) # ### In the search for writing 'homoscedasticity' right at the first attempt # The best approach to test homoscedasticity for two metric variables is graphically. Departures from an equal dispersion are shown by such shapes as cones (small dispersion at one side of the graph, large dispersion at the opposite side) or diamonds (a large number of points at the center of the distribution). # Starting by 'SalePrice' and 'GrLivArea'... # scatter plot plt.scatter(df_train["GrLivArea"], df_train["SalePrice"]) # Older versions of this scatter plot (previous to log transformations), had a conic shape (go back and check 'Scatter plots between 'SalePrice' and correlated variables (move like Jagger style)'). As you can see, the current scatter plot doesn't have a conic shape anymore. That's the power of normality! Just by ensuring normality in some variables, we solved the homoscedasticity problem. # Now let's check 'SalePrice' with 'TotalBsmtSF'. # scatter plot plt.scatter( df_train[df_train["TotalBsmtSF"] > 0]["TotalBsmtSF"], df_train[df_train["TotalBsmtSF"] > 0]["SalePrice"], ) # We can say that, in general, 'SalePrice' exhibit equal levels of variance across the range of 'TotalBsmtSF'. Cool! # # Last but not the least, dummy variables # Easy mode. # convert categorical variable into dummy df_train = pd.get_dummies(df_train)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/235/69235129.ipynb
null
null
[{"Id": 69235129, "ScriptId": 18875910, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5076529, "CreationDate": "07/28/2021 12:43:31", "VersionNumber": 1.0, "Title": "Comprehensive data exploration with Python", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 437.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 432.0, "LinesInsertedFromFork": 5.0, "LinesDeletedFromFork": 29.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 432.0, "TotalVotes": 1}]
null
null
null
null
# # COMPREHENSIVE DATA EXPLORATION WITH PYTHON # [Pedro Marcelino](http://pmarcelino.com) - February 2017 # Other Kernels: [Data analysis and feature extraction with Python # ](https://www.kaggle.com/pmarcelino/data-analysis-and-feature-extraction-with-python) # ---------- # 'The most difficult thing in life is to know yourself' # This quote belongs to Thales of Miletus. Thales was a Greek/Phonecian philosopher, mathematician and astronomer, which is recognised as the first individual in Western civilisation known to have entertained and engaged in scientific thought (source: https://en.wikipedia.org/wiki/Thales) # I wouldn't say that knowing your data is the most difficult thing in data science, but it is time-consuming. Therefore, it's easy to overlook this initial step and jump too soon into the water. # So I tried to learn how to swim before jumping into the water. Based on [Hair et al. (2013)](https://amzn.to/2JuDmvo), chapter 'Examining your data', I did my best to follow a comprehensive, but not exhaustive, analysis of the data. I'm far from reporting a rigorous study in this kernel, but I hope that it can be useful for the community, so I'm sharing how I applied some of those data analysis principles to this problem. # Despite the strange names I gave to the chapters, what we are doing in this kernel is something like: # 1. Understand the problem. We'll look at each variable and do a philosophical analysis about their meaning and importance for this problem. # 2. Univariable study. We'll just focus on the dependent variable ('SalePrice') and try to know a little bit more about it. # 3. Multivariate study. We'll try to understand how the dependent variable and independent variables relate. # 4. Basic cleaning. We'll clean the dataset and handle the missing data, outliers and categorical variables. # 5. Test assumptions. We'll check if our data meets the assumptions required by most multivariate techniques. # Now, it's time to have fun! # invite people for the Kaggle party import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np from scipy.stats import norm from sklearn.preprocessing import StandardScaler from scipy import stats import warnings warnings.filterwarnings("ignore") # bring in the six packs df_train = pd.read_csv("../input/train.csv") df_train.head() # check the decoration df_train.columns # # 1. So... What can we expect? # In order to understand our data, we can look at each variable and try to understand their meaning and relevance to this problem. I know this is time-consuming, but it will give us the flavour of our dataset. # In order to have some discipline in our analysis, we can create an Excel spreadsheet with the following columns: # * Variable - Variable name. # * Type - Identification of the variables' type. There are two possible values for this field: 'numerical' or 'categorical'. By 'numerical' we mean variables for which the values are numbers, and by 'categorical' we mean variables for which the values are categories. # * Segment - Identification of the variables' segment. We can define three possible segments: building, space or location. When we say 'building', we mean a variable that relates to the physical characteristics of the building (e.g. 'OverallQual'). When we say 'space', we mean a variable that reports space properties of the house (e.g. 'TotalBsmtSF'). Finally, when we say a 'location', we mean a variable that gives information about the place where the house is located (e.g. 'Neighborhood'). # * Expectation - Our expectation about the variable influence in 'SalePrice'. We can use a categorical scale with 'High', 'Medium' and 'Low' as possible values. # * Conclusion - Our conclusions about the importance of the variable, after we give a quick look at the data. We can keep with the same categorical scale as in 'Expectation'. # * Comments - Any general comments that occured to us. # While 'Type' and 'Segment' is just for possible future reference, the column 'Expectation' is important because it will help us develop a 'sixth sense'. To fill this column, we should read the description of all the variables and, one by one, ask ourselves: # * Do we think about this variable when we are buying a house? (e.g. When we think about the house of our dreams, do we care about its 'Masonry veneer type'?). # * If so, how important would this variable be? (e.g. What is the impact of having 'Excellent' material on the exterior instead of 'Poor'? And of having 'Excellent' instead of 'Good'?). # * Is this information already described in any other variable? (e.g. If 'LandContour' gives the flatness of the property, do we really need to know the 'LandSlope'?). # After this daunting exercise, we can filter the spreadsheet and look carefully to the variables with 'High' 'Expectation'. Then, we can rush into some scatter plots between those variables and 'SalePrice', filling in the 'Conclusion' column which is just the correction of our expectations. # I went through this process and concluded that the following variables can play an important role in this problem: # * OverallQual (which is a variable that I don't like because I don't know how it was computed; a funny exercise would be to predict 'OverallQual' using all the other variables available). # * YearBuilt. # * TotalBsmtSF. # * GrLivArea. # I ended up with two 'building' variables ('OverallQual' and 'YearBuilt') and two 'space' variables ('TotalBsmtSF' and 'GrLivArea'). This might be a little bit unexpected as it goes against the real estate mantra that all that matters is 'location, location and location'. It is possible that this quick data examination process was a bit harsh for categorical variables. For example, I expected the 'Neigborhood' variable to be more relevant, but after the data examination I ended up excluding it. Maybe this is related to the use of scatter plots instead of boxplots, which are more suitable for categorical variables visualization. The way we visualize data often influences our conclusions. # However, the main point of this exercise was to think a little about our data and expectactions, so I think we achieved our goal. Now it's time for 'a little less conversation, a little more action please'. Let's shake it! # # 2. First things first: analysing 'SalePrice' # 'SalePrice' is the reason of our quest. It's like when we're going to a party. We always have a reason to be there. Usually, women are that reason. (disclaimer: adapt it to men, dancing or alcohol, according to your preferences) # Using the women analogy, let's build a little story, the story of 'How we met 'SalePrice''. # *Everything started in our Kaggle party, when we were looking for a dance partner. After a while searching in the dance floor, we saw a girl, near the bar, using dance shoes. That's a sign that she's there to dance. We spend much time doing predictive modelling and participating in analytics competitions, so talking with girls is not one of our super powers. Even so, we gave it a try:* # *'Hi, I'm Kaggly! And you? 'SalePrice'? What a beautiful name! You know 'SalePrice', could you give me some data about you? I just developed a model to calculate the probability of a successful relationship between two people. I'd like to apply it to us!'* # descriptive statistics summary df_train["SalePrice"].describe() # print(df_train['SalePrice']) # *'Very well... It seems that your minimum price is larger than zero. Excellent! You don't have one of those personal traits that would destroy my model! Do you have any picture that you can send me? I don't know... like, you in the beach... or maybe a selfie in the gym?'* # histogram sns.distplot(df_train["SalePrice"]) # *'Ah! I see you that you use seaborn makeup when you're going out... That's so elegant! I also see that you:* # * *Deviate from the normal distribution.* # * *Have appreciable positive skewness.* # * *Show peakedness.* # *This is getting interesting! 'SalePrice', could you give me your body measures?'* # skewness and kurtosis print("Skewness: %f" % df_train["SalePrice"].skew()) print("Kurtosis: %f" % df_train["SalePrice"].kurt()) # # 'SalePrice', her buddies and her interests # ### Relationship with numerical variables # scatter plot grlivarea/saleprice var = "GrLivArea" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) data.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000)) print(data[:5]) # *Hmmm... It seems that 'SalePrice' and 'GrLivArea' are really old friends, with a linear relationship.* # *And what about 'TotalBsmtSF'?* # scatter plot totalbsmtsf/saleprice var = "TotalBsmtSF" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) data.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000)) print(data[:5]) # *'TotalBsmtSF' is also a great friend of 'SalePrice' but this seems a much more emotional relationship! Everything is ok and suddenly, in a strong linear (exponential?) reaction, everything changes. Moreover, it's clear that sometimes 'TotalBsmtSF' closes in itself and gives zero credit to 'SalePrice'.* # ### Relationship with categorical features # box plot overallqual/saleprice var = "OverallQual" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000) # *Like all the pretty girls, 'SalePrice' enjoys 'OverallQual'. Note to self: consider whether McDonald's is suitable for the first date.* var = "YearBuilt" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) f, ax = plt.subplots(figsize=(16, 8)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000) plt.xticks(rotation=90) # *Although it's not a strong tendency, I'd say that 'SalePrice' is more prone to spend more money in new stuff than in old relics.* # Note: we don't know if 'SalePrice' is in constant prices. Constant prices try to remove the effect of inflation. If 'SalePrice' is not in constant prices, it should be, so than prices are comparable over the years. # ### In summary # Stories aside, we can conclude that: # * 'GrLivArea' and 'TotalBsmtSF' seem to be linearly related with 'SalePrice'. Both relationships are positive, which means that as one variable increases, the other also increases. In the case of 'TotalBsmtSF', we can see that the slope of the linear relationship is particularly high. # * 'OverallQual' and 'YearBuilt' also seem to be related with 'SalePrice'. The relationship seems to be stronger in the case of 'OverallQual', where the box plot shows how sales prices increase with the overall quality. # We just analysed four variables, but there are many other that we should analyse. The trick here seems to be the choice of the right features (feature selection) and not the definition of complex relationships between them (feature engineering). # That said, let's separate the wheat from the chaff. # #### Correlation matrix (heatmap style) # correlation matrix corrmat = df_train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=0.8, square=True) # In my opinion, this heatmap is the best way to get a quick overview of our 'plasma soup' and its relationships. (Thank you @seaborn!) # At first sight, there are two red colored squares that get my attention. The first one refers to the 'TotalBsmtSF' and '1stFlrSF' variables, and the second one refers to the 'Garage*X*' variables. Both cases show how significant the correlation is between these variables. Actually, this correlation is so strong that it can indicate a situation of multicollinearity. If we think about these variables, we can conclude that they give almost the same information so multicollinearity really occurs. Heatmaps are great to detect this kind of situations and in problems dominated by feature selection, like ours, they are an essential tool. # Another thing that got my attention was the 'SalePrice' correlations. We can see our well-known 'GrLivArea', 'TotalBsmtSF', and 'OverallQual' saying a big 'Hi!', but we can also see many other variables that should be taken into account. That's what we will do next. # #### 'SalePrice' correlation matrix (zoomed heatmap style) # saleprice correlation matrix k = 10 # number of variables for heatmap cols = corrmat.nlargest(k, "SalePrice")["SalePrice"].index cm = np.corrcoef(df_train[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap( cm, cbar=True, annot=True, square=True, fmt=".2f", annot_kws={"size": 10}, yticklabels=cols.values, xticklabels=cols.values, ) plt.show() # According to our crystal ball, these are the variables most correlated with 'SalePrice'. My thoughts on this: # * 'OverallQual', 'GrLivArea' and 'TotalBsmtSF' are strongly correlated with 'SalePrice'. Check! # * 'GarageCars' and 'GarageArea' are also some of the most strongly correlated variables. However, as we discussed in the last sub-point, the number of cars that fit into the garage is a consequence of the garage area. 'GarageCars' and 'GarageArea' are like twin brothers. You'll never be able to distinguish them. Therefore, we just need one of these variables in our analysis (we can keep 'GarageCars' since its correlation with 'SalePrice' is higher). # * 'TotalBsmtSF' and '1stFloor' also seem to be twin brothers. We can keep 'TotalBsmtSF' just to say that our first guess was right (re-read 'So... What can we expect?'). # * 'FullBath'?? Really? # * 'TotRmsAbvGrd' and 'GrLivArea', twin brothers again. Is this dataset from Chernobyl? # * Ah... 'YearBuilt'... It seems that 'YearBuilt' is slightly correlated with 'SalePrice'. Honestly, it scares me to think about 'YearBuilt' because I start feeling that we should do a little bit of time-series analysis to get this right. I'll leave this as a homework for you. # Let's proceed to the scatter plots. # #### Scatter plots between 'SalePrice' and correlated variables (move like Jagger style) # Get ready for what you're about to see. I must confess that the first time I saw these scatter plots I was totally blown away! So much information in so short space... It's just amazing. Once more, thank you @seaborn! You make me 'move like Jagger'! # scatterplot sns.set() cols = [ "SalePrice", "OverallQual", "GrLivArea", "GarageCars", "TotalBsmtSF", "FullBath", "YearBuilt", ] sns.pairplot(df_train[cols], size=2.5) plt.show() # Although we already know some of the main figures, this mega scatter plot gives us a reasonable idea about variables relationships. # One of the figures we may find interesting is the one between 'TotalBsmtSF' and 'GrLiveArea'. In this figure we can see the dots drawing a linear line, which almost acts like a border. It totally makes sense that the majority of the dots stay below that line. Basement areas can be equal to the above ground living area, but it is not expected a basement area bigger than the above ground living area (unless you're trying to buy a bunker). # The plot concerning 'SalePrice' and 'YearBuilt' can also make us think. In the bottom of the 'dots cloud', we see what almost appears to be a shy exponential function (be creative). We can also see this same tendency in the upper limit of the 'dots cloud' (be even more creative). Also, notice how the set of dots regarding the last years tend to stay above this limit (I just wanted to say that prices are increasing faster now). # Ok, enough of Rorschach test for now. Let's move forward to what's missing: missing data! # # 4. Missing data # Important questions when thinking about missing data: # * How prevalent is the missing data? # * Is missing data random or does it have a pattern? # The answer to these questions is important for practical reasons because missing data can imply a reduction of the sample size. This can prevent us from proceeding with the analysis. Moreover, from a substantive perspective, we need to ensure that the missing data process is not biased and hidding an inconvenient truth. # missing data total = df_train.isnull().sum().sort_values(ascending=False) percent = (df_train.isnull().sum() / df_train.isnull().count()).sort_values( ascending=False ) missing_data = pd.concat([total, percent], axis=1, keys=["Total", "Percent"]) missing_data.head(20) # Let's analyse this to understand how to handle the missing data. # We'll consider that when more than 15% of the data is missing, we should delete the corresponding variable and pretend it never existed. This means that we will not try any trick to fill the missing data in these cases. According to this, there is a set of variables (e.g. 'PoolQC', 'MiscFeature', 'Alley', etc.) that we should delete. The point is: will we miss this data? I don't think so. None of these variables seem to be very important, since most of them are not aspects in which we think about when buying a house (maybe that's the reason why data is missing?). Moreover, looking closer at the variables, we could say that variables like 'PoolQC', 'MiscFeature' and 'FireplaceQu' are strong candidates for outliers, so we'll be happy to delete them. # In what concerns the remaining cases, we can see that 'Garage*X*' variables have the same number of missing data. I bet missing data refers to the same set of observations (although I will not check it; it's just 5% and we should not spend 20$ in 5$ problems). Since the most important information regarding garages is expressed by 'GarageCars' and considering that we are just talking about 5% of missing data, I'll delete the mentioned 'Garage*X*' variables. The same logic applies to 'Bsmt*X*' variables. # Regarding 'MasVnrArea' and 'MasVnrType', we can consider that these variables are not essential. Furthermore, they have a strong correlation with 'YearBuilt' and 'OverallQual' which are already considered. Thus, we will not lose information if we delete 'MasVnrArea' and 'MasVnrType'. # Finally, we have one missing observation in 'Electrical'. Since it is just one observation, we'll delete this observation and keep the variable. # In summary, to handle missing data, we'll delete all the variables with missing data, except the variable 'Electrical'. In 'Electrical' we'll just delete the observation with missing data. # dealing with missing data df_train = df_train.drop((missing_data[missing_data["Total"] > 1]).index, 1) df_train = df_train.drop(df_train.loc[df_train["Electrical"].isnull()].index) df_train.isnull().sum().max() # just checking that there's no missing data missing... # # Out liars! # Outliers is also something that we should be aware of. Why? Because outliers can markedly affect our models and can be a valuable source of information, providing us insights about specific behaviours. # Outliers is a complex subject and it deserves more attention. Here, we'll just do a quick analysis through the standard deviation of 'SalePrice' and a set of scatter plots. # ### Univariate analysis # The primary concern here is to establish a threshold that defines an observation as an outlier. To do so, we'll standardize the data. In this context, data standardization means converting data values to have mean of 0 and a standard deviation of 1. # standardizing data saleprice_scaled = StandardScaler().fit_transform(df_train["SalePrice"][:, np.newaxis]) low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10] high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:] print("outer range (low) of the distribution:") print(low_range) print("\nouter range (high) of the distribution:") print(high_range) # How 'SalePrice' looks with her new clothes: # * Low range values are similar and not too far from 0. # * High range values are far from 0 and the 7.something values are really out of range. # For now, we'll not consider any of these values as an outlier but we should be careful with those two 7.something values. # ### Bivariate analysis # We already know the following scatter plots by heart. However, when we look to things from a new perspective, there's always something to discover. As Alan Kay said, 'a change in perspective is worth 80 IQ points'. # bivariate analysis saleprice/grlivarea var = "GrLivArea" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) data.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000)) # What has been revealed: # * The two values with bigger 'GrLivArea' seem strange and they are not following the crowd. We can speculate why this is happening. Maybe they refer to agricultural area and that could explain the low price. I'm not sure about this but I'm quite confident that these two points are not representative of the typical case. Therefore, we'll define them as outliers and delete them. # * The two observations in the top of the plot are those 7.something observations that we said we should be careful about. They look like two special cases, however they seem to be following the trend. For that reason, we will keep them. # deleting points df_train.sort_values(by="GrLivArea", ascending=False)[:2] df_train = df_train.drop(df_train[df_train["Id"] == 1299].index) df_train = df_train.drop(df_train[df_train["Id"] == 524].index) # bivariate analysis saleprice/grlivarea var = "TotalBsmtSF" data = pd.concat([df_train["SalePrice"], df_train[var]], axis=1) data.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000)) # We can feel tempted to eliminate some observations (e.g. TotalBsmtSF > 3000) but I suppose it's not worth it. We can live with that, so we'll not do anything. # # 5. Getting hard core # In Ayn Rand's novel, 'Atlas Shrugged', there is an often-repeated question: who is John Galt? A big part of the book is about the quest to discover the answer to this question. # I feel Randian now. Who is 'SalePrice'? # The answer to this question lies in testing for the assumptions underlying the statistical bases for multivariate analysis. We already did some data cleaning and discovered a lot about 'SalePrice'. Now it's time to go deep and understand how 'SalePrice' complies with the statistical assumptions that enables us to apply multivariate techniques. # According to [Hair et al. (2013)](https://amzn.to/2uC3j9p), four assumptions should be tested: # * Normality - When we talk about normality what we mean is that the data should look like a normal distribution. This is important because several statistic tests rely on this (e.g. t-statistics). In this exercise we'll just check univariate normality for 'SalePrice' (which is a limited approach). Remember that univariate normality doesn't ensure multivariate normality (which is what we would like to have), but it helps. Another detail to take into account is that in big samples (>200 observations) normality is not such an issue. However, if we solve normality, we avoid a lot of other problems (e.g. heteroscedacity) so that's the main reason why we are doing this analysis. # * Homoscedasticity - I just hope I wrote it right. Homoscedasticity refers to the 'assumption that dependent variable(s) exhibit equal levels of variance across the range of predictor variable(s)' [(Hair et al., 2013)](https://amzn.to/2uC3j9p). Homoscedasticity is desirable because we want the error term to be the same across all values of the independent variables. # * Linearity- The most common way to assess linearity is to examine scatter plots and search for linear patterns. If patterns are not linear, it would be worthwhile to explore data transformations. However, we'll not get into this because most of the scatter plots we've seen appear to have linear relationships. # * Absence of correlated errors - Correlated errors, like the definition suggests, happen when one error is correlated to another. For instance, if one positive error makes a negative error systematically, it means that there's a relationship between these variables. This occurs often in time series, where some patterns are time related. We'll also not get into this. However, if you detect something, try to add a variable that can explain the effect you're getting. That's the most common solution for correlated errors. # What do you think Elvis would say about this long explanation? 'A little less conversation, a little more action please'? Probably... By the way, do you know what was Elvis's last great hit? # (...) # The bathroom floor. # ### In the search for normality # The point here is to test 'SalePrice' in a very lean way. We'll do this paying attention to: # * Histogram - Kurtosis and skewness. # * Normal probability plot - Data distribution should closely follow the diagonal that represents the normal distribution. # histogram and normal probability plot sns.distplot(df_train["SalePrice"], fit=norm) fig = plt.figure() res = stats.probplot(df_train["SalePrice"], plot=plt) # Ok, 'SalePrice' is not normal. It shows 'peakedness', positive skewness and does not follow the diagonal line. # But everything's not lost. A simple data transformation can solve the problem. This is one of the awesome things you can learn in statistical books: in case of positive skewness, log transformations usually works well. When I discovered this, I felt like an Hogwarts' student discovering a new cool spell. # *Avada kedavra!* # applying log transformation df_train["SalePrice"] = np.log(df_train["SalePrice"]) # transformed histogram and normal probability plot sns.distplot(df_train["SalePrice"], fit=norm) fig = plt.figure() res = stats.probplot(df_train["SalePrice"], plot=plt) # Done! Let's check what's going on with 'GrLivArea'. # histogram and normal probability plot sns.distplot(df_train["GrLivArea"], fit=norm) fig = plt.figure() res = stats.probplot(df_train["GrLivArea"], plot=plt) # Tastes like skewness... *Avada kedavra!* # data transformation df_train["GrLivArea"] = np.log(df_train["GrLivArea"]) # transformed histogram and normal probability plot sns.distplot(df_train["GrLivArea"], fit=norm) fig = plt.figure() res = stats.probplot(df_train["GrLivArea"], plot=plt) # Next, please... # histogram and normal probability plot sns.distplot(df_train["TotalBsmtSF"], fit=norm) fig = plt.figure() res = stats.probplot(df_train["TotalBsmtSF"], plot=plt) # Ok, now we are dealing with the big boss. What do we have here? # * Something that, in general, presents skewness. # * A significant number of observations with value zero (houses without basement). # * A big problem because the value zero doesn't allow us to do log transformations. # To apply a log transformation here, we'll create a variable that can get the effect of having or not having basement (binary variable). Then, we'll do a log transformation to all the non-zero observations, ignoring those with value zero. This way we can transform data, without losing the effect of having or not basement. # I'm not sure if this approach is correct. It just seemed right to me. That's what I call 'high risk engineering'. # create column for new variable (one is enough because it's a binary categorical feature) # if area>0 it gets 1, for area==0 it gets 0 df_train["HasBsmt"] = pd.Series(len(df_train["TotalBsmtSF"]), index=df_train.index) df_train["HasBsmt"] = 0 df_train.loc[df_train["TotalBsmtSF"] > 0, "HasBsmt"] = 1 # transform data df_train.loc[df_train["HasBsmt"] == 1, "TotalBsmtSF"] = np.log(df_train["TotalBsmtSF"]) # histogram and normal probability plot sns.distplot(df_train[df_train["TotalBsmtSF"] > 0]["TotalBsmtSF"], fit=norm) fig = plt.figure() res = stats.probplot(df_train[df_train["TotalBsmtSF"] > 0]["TotalBsmtSF"], plot=plt) # ### In the search for writing 'homoscedasticity' right at the first attempt # The best approach to test homoscedasticity for two metric variables is graphically. Departures from an equal dispersion are shown by such shapes as cones (small dispersion at one side of the graph, large dispersion at the opposite side) or diamonds (a large number of points at the center of the distribution). # Starting by 'SalePrice' and 'GrLivArea'... # scatter plot plt.scatter(df_train["GrLivArea"], df_train["SalePrice"]) # Older versions of this scatter plot (previous to log transformations), had a conic shape (go back and check 'Scatter plots between 'SalePrice' and correlated variables (move like Jagger style)'). As you can see, the current scatter plot doesn't have a conic shape anymore. That's the power of normality! Just by ensuring normality in some variables, we solved the homoscedasticity problem. # Now let's check 'SalePrice' with 'TotalBsmtSF'. # scatter plot plt.scatter( df_train[df_train["TotalBsmtSF"] > 0]["TotalBsmtSF"], df_train[df_train["TotalBsmtSF"] > 0]["SalePrice"], ) # We can say that, in general, 'SalePrice' exhibit equal levels of variance across the range of 'TotalBsmtSF'. Cool! # # Last but not the least, dummy variables # Easy mode. # convert categorical variable into dummy df_train = pd.get_dummies(df_train)
false
0
7,720
1
7,720
7,720
69235195
<jupyter_start><jupyter_text>QMNIST - The Extended MNIST Dataset (120k images) ### Context The exact preprocessing steps used to construct the MNIST dataset have long been lost. This leaves us with no reliable way to associate its characters with the ID of the writer and little hope to recover the full MNIST testing set that had 60K images but was never released. The official MNIST testing set only contains 10K randomly sampled images and is often considered too small to provide meaningful confidence intervals. The QMNIST dataset was generated from the original data found in the NIST Special Database 19 with the goal to match the MNIST preprocessing as closely as possible. ### Content The simplest way to use the QMNIST extended dataset is to download the unique file below (MNIST-120k). This pickle file has the same format as the standard MNIST data files but contains 120000 examples. You can use the following lines of code to load the data: ``` def unpickle(file): import pickle with open(file, 'rb') as fo: dict = pickle.load(fo, encoding='bytes') return dict ``` `qmnist = unpickle("MNIST-120k")` The data comes in a dictionary format, you can get the data and the labels separately by extracting the content from the dictionary: ``` data = qmnist['data'] labels = qmnist['labels'] ``` ### Source The original QMNIST dataset was uploaded by Chhavi Yadav and Léon Bottou. Citation: &gt; Yadav, C. and Bottou, L., “Cold Case: The Lost MNIST Digits”, <i>arXiv e-prints</i>, 2019. Link to the original paper: [https://arxiv.org/pdf/1905.10498.pdf](https://arxiv.org/pdf/1905.10498.pdf) Link to the GitHub repository: [https://github.com/facebookresearch/qmnist](https://github.com/facebookresearch/qmnist) My contribution was to collect all the images and labels into the same file and convert it into a pickle file so it is easier to load. Please consider mentioning the author if you use this dataset instead of the original version. Kaggle dataset identifier: qmnist-the-extended-mnist-dataset-120k-images <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session def unpickle(file): import pickle with open(file, "rb") as fo: dict = pickle.load(fo, encoding="bytes") return dict from pyTsetlinMachineParallel.tm import MultiClassTsetlinMachine import numpy as np from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from time import time from tqdm import tqdm from keras.datasets import mnist epochs = 200 # (X_train, Y_train), (X_test, Y_test) = mnist.load_data() qmnist = unpickle( "/kaggle/input/qmnist-the-extended-mnist-dataset-120k-images/MNIST-120k" ) data = qmnist["data"] labels = qmnist["labels"] X_train, X_val, Y_train, Y_val = train_test_split( data, labels, test_size=0.05, random_state=31 ) X_train = np.where(X_train.reshape((X_train.shape[0], 28 * 28)) > 75, 1, 0) X_val = np.where(X_val.reshape((X_val.shape[0], 28 * 28)) > 75, 1, 0) Y_train = Y_train.flatten() Y_val = Y_val.flatten() tm = MultiClassTsetlinMachine(2000, 50, 10.0) acc_test = [] print("\nAccuracy over {} epochs:\n".format(epochs)) for i in tqdm(range(epochs)): start_training = time() tm.fit(X_train, Y_train, epochs=1, incremental=True) stop_training = time() start_testing = time() result = 100 * (tm.predict(X_val) == Y_val).mean() stop_testing = time() # print("#%d Accuracy: %.2f%% Training: %.2fs Testing: %.2fs" % (i+1, result, stop_training-start_training, stop_testing-start_testing)) acc_test.append(result) plt.figure(figsize=(10, 5)) plt.title("Accuracy on validation set") plt.plot(acc_test, label="validation") plt.legend() plt.show() X_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") X_test = np.where(X_test.to_numpy() > 75, 1, 0) Y_test = tm.predict(X_test) d = {"ImageId": [i for i in range(1, 28001)], "Label": Y_test} df = pd.DataFrame(data=d) df.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/235/69235195.ipynb
qmnist-the-extended-mnist-dataset-120k-images
fedesoriano
[{"Id": 69235195, "ScriptId": 17471537, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6402661, "CreationDate": "07/28/2021 12:44:26", "VersionNumber": 8.0, "Title": "MNIST Top Solution", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 87.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 85.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92156739, "KernelVersionId": 69235195, "SourceDatasetVersionId": 2458532}]
[{"Id": 2458532, "DatasetId": 1488071, "DatasourceVersionId": 2500942, "CreatorUserId": 6402661, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "07/24/2021 15:31:01", "VersionNumber": 3.0, "Title": "QMNIST - The Extended MNIST Dataset (120k images)", "Slug": "qmnist-the-extended-mnist-dataset-120k-images", "Subtitle": "Improve the computer vision performance with the expanded version of MNIST data", "Description": "### Context\n\nThe exact preprocessing steps used to construct the MNIST dataset have long been lost. This leaves us with no reliable way to associate its characters with the ID of the writer and little hope to recover the full MNIST testing set that had 60K images but was never released. The official MNIST testing set only contains 10K randomly sampled images and is often considered too small to provide meaningful confidence intervals.\n\nThe QMNIST dataset was generated from the original data found in the NIST Special Database 19 with the goal to match the MNIST preprocessing as closely as possible.\n\n\n### Content\n\nThe simplest way to use the QMNIST extended dataset is to download the unique file below (MNIST-120k). This pickle file has the same format as the standard MNIST data files but contains 120000 examples.\n\nYou can use the following lines of code to load the data:\n```\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n```\n`qmnist = unpickle(\"MNIST-120k\")`\n\nThe data comes in a dictionary format, you can get the data and the labels separately by extracting the content from the dictionary:\n```\ndata = qmnist['data']\nlabels = qmnist['labels']\n```\n\n\n### Source\n\nThe original QMNIST dataset was uploaded by Chhavi Yadav and L\u00e9on Bottou. Citation:\n&gt; Yadav, C. and Bottou, L., \u201cCold Case: The Lost MNIST Digits\u201d, <i>arXiv e-prints</i>, 2019.\n\nLink to the original paper: [https://arxiv.org/pdf/1905.10498.pdf](https://arxiv.org/pdf/1905.10498.pdf)\nLink to the GitHub repository: [https://github.com/facebookresearch/qmnist](https://github.com/facebookresearch/qmnist)\n\nMy contribution was to collect all the images and labels into the same file and convert it into a pickle file so it is easier to load. Please consider mentioning the author if you use this dataset instead of the original version.", "VersionNotes": "Data Update 2021/07/24", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1488071, "CreatorUserId": 6402661, "OwnerUserId": 6402661.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2458532.0, "CurrentDatasourceVersionId": 2500942.0, "ForumId": 1507762, "Type": 2, "CreationDate": "07/24/2021 15:04:30", "LastActivityDate": "07/24/2021", "TotalViews": 9691, "TotalDownloads": 859, "TotalVotes": 29, "TotalKernels": 20}]
[{"Id": 6402661, "UserName": "fedesoriano", "DisplayName": "fedesoriano", "RegisterDate": "12/18/2020", "PerformanceTier": 4}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session def unpickle(file): import pickle with open(file, "rb") as fo: dict = pickle.load(fo, encoding="bytes") return dict from pyTsetlinMachineParallel.tm import MultiClassTsetlinMachine import numpy as np from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from time import time from tqdm import tqdm from keras.datasets import mnist epochs = 200 # (X_train, Y_train), (X_test, Y_test) = mnist.load_data() qmnist = unpickle( "/kaggle/input/qmnist-the-extended-mnist-dataset-120k-images/MNIST-120k" ) data = qmnist["data"] labels = qmnist["labels"] X_train, X_val, Y_train, Y_val = train_test_split( data, labels, test_size=0.05, random_state=31 ) X_train = np.where(X_train.reshape((X_train.shape[0], 28 * 28)) > 75, 1, 0) X_val = np.where(X_val.reshape((X_val.shape[0], 28 * 28)) > 75, 1, 0) Y_train = Y_train.flatten() Y_val = Y_val.flatten() tm = MultiClassTsetlinMachine(2000, 50, 10.0) acc_test = [] print("\nAccuracy over {} epochs:\n".format(epochs)) for i in tqdm(range(epochs)): start_training = time() tm.fit(X_train, Y_train, epochs=1, incremental=True) stop_training = time() start_testing = time() result = 100 * (tm.predict(X_val) == Y_val).mean() stop_testing = time() # print("#%d Accuracy: %.2f%% Training: %.2fs Testing: %.2fs" % (i+1, result, stop_training-start_training, stop_testing-start_testing)) acc_test.append(result) plt.figure(figsize=(10, 5)) plt.title("Accuracy on validation set") plt.plot(acc_test, label="validation") plt.legend() plt.show() X_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") X_test = np.where(X_test.to_numpy() > 75, 1, 0) Y_test = tm.predict(X_test) d = {"ImageId": [i for i in range(1, 28001)], "Label": Y_test} df = pd.DataFrame(data=d) df.to_csv("submission.csv", index=False)
false
1
878
0
1,436
878
69235789
# # Tabular Playground JULY 2021 # The goal of competitions is to provide a fun, and approachable for anyone, tabular dataset. # ## Evaluation Scheme # The RMSLE for a single column calculated as: # $$\sqrt{\frac{1}{n} \sum_{i=1}^n (\log(p_i + 1) - \log(a_i+1))^2 }$$ # where: # $n$ is the total number of observations # $p_i$ is your prediction # $a_i$ is the actual value # $log(x)$ is the natural logarithm of # # Importing Dependencies # - pandas : for csv reading and data analysis # - numpy : for array manipulation # - matplotlib.pyplot : for plotting graphs # - os : os level commands # - seaborn : for better looking plots import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import seaborn as sns # path to dataset folder dataset_path = "../input/tabular-playground-series-jul-2021" os.listdir(dataset_path) # # Basic Data Analysis train_csv = pd.read_csv(os.path.join(dataset_path, "train.csv")) train_csv.head() test_csv = pd.read_csv(os.path.join(dataset_path, "test.csv")) test_csv.head() submission_csv = pd.read_csv(os.path.join(dataset_path, "sample_submission.csv")) submission_csv.head() # train data shape train_csv.shape # check if any value in dataframe is null train_csv.isnull().sum() train_csv.describe() train_csv.columns print("Range of datetime in training data") print(train_csv["date_time"].min()) print("to") print(train_csv["date_time"].max()) print("Range of datetime in test data") print(test_csv["date_time"].min()) print("to") print(test_csv["date_time"].max()) # # Data Visualization # - plot time series targets values # - plot time series sensor data # ### Plotting targets train_csv.plot(x="date_time", y="target_carbon_monoxide", rot=50) train_csv.plot(x="date_time", y="target_benzene", rot=50) train_csv.plot(x="date_time", y="target_nitrogen_oxides", rot=50) # ### Plotting sensor data train_csv.plot(x="date_time", y="sensor_1", rot=50) train_csv.plot(x="date_time", y="sensor_2", rot=50) train_csv.plot(x="date_time", y="sensor_3", rot=50) train_csv.plot(x="date_time", y="sensor_4", rot=50) train_csv.plot(x="date_time", y="sensor_5", rot=50) # # Feature Engineering # - changing datatypes of date column to pd datetime # - convert datetime to features # - extracting phase of day with respect to time # - extracting season of the year using month # - ratio of relative humidity and temperature # changing dtype of columns def change_dtypes(df): df["date_time"] = pd.to_datetime(df["date_time"]) change_dtypes(train_csv) # extracting features using datetime def datetime2features(df): time_col = "date_time" df["year"] = df[time_col].dt.year df["month"] = df[time_col].dt.month df["day"] = df[time_col].dt.day df["hour"] = df[time_col].dt.hour df["dayofweek"] = df[time_col].dt.dayofweek df["year"] = df[time_col].dt.year df["weekend"] = df[time_col].dt.dayofweek.apply(lambda x: 1 if (x > 4) else 0) """ which phase of day the time denotes [morning, afternoon, evening, night] """ def time_phase(df): def which_phase(hour): if hour >= 0 and hour <= 5: return 1 elif hour >= 6 and hour <= 11: return 2 elif hour >= 12 and hour <= 17: return 3 elif hour >= 18 and hour <= 23: return 4 return NaN time_col = "date_time" df["phase"] = df[time_col].dt.hour.apply(lambda x: which_phase(x)) datetime2features(train_csv) time_phase(train_csv) train_csv.head() train_csv.describe() """ which season of year the time denotes [summer, rainy, winter] """ def season(df): def which_season(month): if month >= 3 and month <= 6: return 1 elif month >= 7 and month <= 9: return 2 elif month >= 10 and month <= 12: return 3 elif month < 3: return 3 return NaN time_col = "date_time" df["season"] = df[time_col].dt.month.apply(lambda x: which_season(x)) season(train_csv) train_csv.head() train_csv.describe() """ ratio between relative humidity and temperature """ def ratio_rh_temp(df): df["r_rh_temp"] = df["relative_humidity"] / (df["deg_C"] + 1e-9) ratio_rh_temp(train_csv) train_csv.head() train_csv.describe() # ### Ploting correlations between columns plt.figure(figsize=(10, 8)) sns.heatmap(train_csv.corr()) model_save_folder = "models" csv_folder = "csv" os.makedirs(model_save_folder, exist_ok=True) os.makedirs(csv_folder, exist_ok=True) train_csv.to_csv(os.path.join(csv_folder, "train_edit.csv"), index=False) test_csv.describe() change_dtypes(test_csv) datetime2features(test_csv) time_phase(test_csv) season(test_csv) ratio_rh_temp(test_csv) test_csv.describe() test_csv.to_csv(os.path.join(csv_folder, "test_edit.csv"), index=False) # # Model Training from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.linear_model import ( LinearRegression, Ridge, BayesianRidge, LogisticRegression, ) from sklearn.svm import SVR from mlxtend.regressor import StackingCVRegressor import lightgbm as lgb import xgboost as xgb import catboost as cbt import sklearn.metrics as metrics import sklearn.model_selection as ms import pickle # Cross validation utility class CrossValidation: def __init__(self, df, shuffle, random_state=None): self.df = df self.random_state = random_state self.shuffle = shuffle if shuffle is True: self.df = df.sample(frac=1, random_state=self.random_state).reset_index( drop=True ) if not shuffle: self.random_state = None def hold_out_split(self, percent, stratify=None): if stratify is not None: y = self.df[stratify] train, val = ms.train_test_split( self.df, test_size=percent / 100, stratify=y, random_state=self.random_state, ) return train, val size = len(self.df) - int(len(self.df) * (percent / 100)) train = self.df.iloc[:size, :] val = self.df.iloc[size:, :] return train, val def kfold_split(self, splits, stratify=None): if stratify is not None: kf = ms.StratifiedKFold(n_splits=splits, random_state=self.random_state) y = self.df[stratify] for train, val in kf.split(X=self.df, y=y): t = self.df.iloc[train, :] v = self.df.iloc[val, :] yield t, v else: kf = ms.KFold( n_splits=splits, shuffle=self.shuffle, random_state=self.random_state ) for train, val in kf.split(X=self.df): t = self.df.iloc[train, :] v = self.df.iloc[val, :] yield t, v # calculate rmsle of predicted data def mse(y_true, y_pred): return metrics.mean_squared_error(y_true, y_pred) folds = 5 seed = 48 features_exclude = ["date_time"] targets = ["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"] features = [col for col in train_csv.columns if col not in features_exclude + targets] print(features) # ### Splitting data into folds for cross validation cv = CrossValidation(train_csv, shuffle=True, random_state=seed) fold_models = {tar: [] for tar in targets} print(fold_models) # # Training m_rfg = RandomForestRegressor(n_estimators=100) m_gb = GradientBoostingRegressor(n_estimators=100) m_lgb = lgb.LGBMRegressor(seed=seed) m_ctb = cbt.CatBoostRegressor(random_seed=seed, verbose=False) m_xgb = xgb.XGBRegressor(random_state=seed) learners = (m_rfg, m_gb, m_lgb, m_ctb, m_xgb) meta_model = BayesianRidge(normalize=True) def train_step(X, Y, evalX, evalY, learners, meta_model, verbose=True): reg = StackingCVRegressor( regressors=learners, meta_regressor=meta_model, n_jobs=-1, verbose=int(verbose) ) trainX = X.values trainY = Y.values model = reg.fit(trainX, trainY) predY_train = model.predict(trainX) train_rmsle = mse(trainY, predY_train) train_r2 = metrics.r2_score(trainY, predY_train) if verbose: print("Training mse: ", train_rmsle) print("Training r2: ", train_r2) valX = evalX.values valY = evalY.values predY_val = model.predict(valX) val_rmsle = mse(valY, predY_val) val_r2 = metrics.r2_score(valY, predY_val) if verbose: print("Validation mse: ", val_rmsle) print("Validation r2: ", val_r2) return { "model": model, "train_scores": {"r2": train_r2, "mse": train_rmsle}, "val_scores": {"r2": val_r2, "mse": val_rmsle}, } def train_folds( cv, feature_cols, target_col, num_folds, learners, meta_model, verbose=True ): fold_train_rmsle = [] fold_train_r2 = [] fold_val_rmsle = [] fold_val_r2 = [] fold_models = [] for fold, (train_, val_) in enumerate(cv.kfold_split(splits=num_folds)): result = train_step( X=train_[feature_cols], Y=train_[target_col], evalX=val_[feature_cols], evalY=val_[target_col], learners=learners, meta_model=meta_model, verbose=verbose, ) fold_train_rmsle.append(result["train_scores"]["mse"]) fold_train_r2.append(result["train_scores"]["r2"]) fold_val_rmsle.append(result["val_scores"]["mse"]) fold_val_r2.append(result["val_scores"]["r2"]) fold_models.append(result["model"]) return { "models": fold_models, "train_scores": { "r2": np.mean(fold_train_r2), "mse": np.mean(fold_train_rmsle), }, "val_scores": {"r2": np.mean(fold_val_r2), "mse": np.mean(fold_val_rmsle)}, } # ### Carbon Monoxide target = "target_carbon_monoxide" results = train_folds(cv, features, target, folds, learners, meta_model) fold_models[target] = results["models"] print("=" * 50) print("Training MSE: ", results["train_scores"]["mse"]) print("Training R2: ", results["train_scores"]["r2"]) print("Validation MSE: ", results["val_scores"]["mse"]) print("Validation R2: ", results["val_scores"]["r2"]) # ### Benzene target = "target_benzene" results = train_folds(cv, features, target, folds, learners, meta_model) fold_models[target] = results["models"] print("=" * 50) print("Training MSE: ", results["train_scores"]["mse"]) print("Training R2: ", results["train_scores"]["r2"]) print("Validation MSE: ", results["val_scores"]["mse"]) print("Validation R2: ", results["val_scores"]["r2"]) # ### Nitrogen Oxides target = "target_nitrogen_oxides" results = train_folds(cv, features, target, folds, learners, meta_model) fold_models[target] = results["models"] print("=" * 50) print("Training MSE: ", results["train_scores"]["mse"]) print("Training R2: ", results["train_scores"]["r2"]) print("Validation MSE: ", results["val_scores"]["mse"]) print("Validation R2: ", results["val_scores"]["r2"]) # ### Prediction blending from folds def get_weights(predictions, targets, apply_softmax=True): def softmax(x): f_x = np.exp(x) / np.sum(np.exp(x)) return f_x lnr = LinearRegression() lnr_model = lnr.fit(predictions, targets) if apply_softmax: return softmax(lnr_model.coef_) return lnr_model.coef_ def weighted_sum(predictions, weights): return np.dot(predictions, weights) trainX = train_csv[features].values trainY = train_csv[targets] predictions = [] preds = [] for model in fold_models[targets[0]]: preds.append(model.predict(trainX)) preds = np.array(preds) weights_0 = get_weights(preds.transpose(), trainY[targets[0]].values) print("Fold Predictions Weightings") print(weights_0) preds = weighted_sum(preds.transpose(), weights_0) predictions.append(preds) preds = [] for model in fold_models[targets[1]]: preds.append(model.predict(trainX)) preds = np.array(preds) weights_1 = get_weights(preds.transpose(), trainY[targets[1]].values) print("Fold Predictions Weightings") print(weights_1) preds = weighted_sum(preds.transpose(), weights_1) predictions.append(preds) preds = [] for model in fold_models[targets[2]]: preds.append(model.predict(trainX)) preds = np.array(preds) weights_2 = get_weights(preds.transpose(), trainY[targets[2]].values) print("Fold Predictions Weightings") print(weights_2) preds = weighted_sum(preds.transpose(), weights_2) predictions.append(preds) predictions = np.array(predictions).transpose() print(predictions.shape) print(trainY.shape) predictions = np.where(predictions < 0, 0, predictions) print("R2 score: ", metrics.r2_score(trainY, predictions)) print("RMSLE score: ", np.sqrt(metrics.mean_squared_log_error(trainY, predictions))) # # Inference on test dataset testX = test_csv[features].values predictions = [] preds = [] for model in fold_models[targets[0]]: preds.append(model.predict(testX)) preds = np.array(preds) preds = weighted_sum(preds.transpose(), weights_0) predictions.append(preds) preds = [] for model in fold_models[targets[1]]: preds.append(model.predict(testX)) preds = np.array(preds) preds = weighted_sum(preds.transpose(), weights_1) predictions.append(preds) preds = [] for model in fold_models[targets[2]]: preds.append(model.predict(testX)) preds = np.array(preds) preds = weighted_sum(preds.transpose(), weights_2) predictions.append(preds) predictions = np.array(predictions).transpose() predictions.shape predictions = np.where(predictions < 0, 0, predictions) submission_csv[targets] = predictions submission_csv.to_csv("submission.csv", index=False) submission_csv.head() for key, models in fold_models.items(): for fold, model in enumerate(models): with open( os.path.join(model_save_folder, f"{key}_fold_{fold+1}.pkl"), "wb" ) as pckl: pickle.dump(model, pckl)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/235/69235789.ipynb
null
null
[{"Id": 69235789, "ScriptId": 18467475, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3741633, "CreationDate": "07/28/2021 12:52:25", "VersionNumber": 6.0, "Title": "tabular playground series jul 2021", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 506.0, "LinesInsertedFromPrevious": 14.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 492.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
null
null
null
null
# # Tabular Playground JULY 2021 # The goal of competitions is to provide a fun, and approachable for anyone, tabular dataset. # ## Evaluation Scheme # The RMSLE for a single column calculated as: # $$\sqrt{\frac{1}{n} \sum_{i=1}^n (\log(p_i + 1) - \log(a_i+1))^2 }$$ # where: # $n$ is the total number of observations # $p_i$ is your prediction # $a_i$ is the actual value # $log(x)$ is the natural logarithm of # # Importing Dependencies # - pandas : for csv reading and data analysis # - numpy : for array manipulation # - matplotlib.pyplot : for plotting graphs # - os : os level commands # - seaborn : for better looking plots import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import seaborn as sns # path to dataset folder dataset_path = "../input/tabular-playground-series-jul-2021" os.listdir(dataset_path) # # Basic Data Analysis train_csv = pd.read_csv(os.path.join(dataset_path, "train.csv")) train_csv.head() test_csv = pd.read_csv(os.path.join(dataset_path, "test.csv")) test_csv.head() submission_csv = pd.read_csv(os.path.join(dataset_path, "sample_submission.csv")) submission_csv.head() # train data shape train_csv.shape # check if any value in dataframe is null train_csv.isnull().sum() train_csv.describe() train_csv.columns print("Range of datetime in training data") print(train_csv["date_time"].min()) print("to") print(train_csv["date_time"].max()) print("Range of datetime in test data") print(test_csv["date_time"].min()) print("to") print(test_csv["date_time"].max()) # # Data Visualization # - plot time series targets values # - plot time series sensor data # ### Plotting targets train_csv.plot(x="date_time", y="target_carbon_monoxide", rot=50) train_csv.plot(x="date_time", y="target_benzene", rot=50) train_csv.plot(x="date_time", y="target_nitrogen_oxides", rot=50) # ### Plotting sensor data train_csv.plot(x="date_time", y="sensor_1", rot=50) train_csv.plot(x="date_time", y="sensor_2", rot=50) train_csv.plot(x="date_time", y="sensor_3", rot=50) train_csv.plot(x="date_time", y="sensor_4", rot=50) train_csv.plot(x="date_time", y="sensor_5", rot=50) # # Feature Engineering # - changing datatypes of date column to pd datetime # - convert datetime to features # - extracting phase of day with respect to time # - extracting season of the year using month # - ratio of relative humidity and temperature # changing dtype of columns def change_dtypes(df): df["date_time"] = pd.to_datetime(df["date_time"]) change_dtypes(train_csv) # extracting features using datetime def datetime2features(df): time_col = "date_time" df["year"] = df[time_col].dt.year df["month"] = df[time_col].dt.month df["day"] = df[time_col].dt.day df["hour"] = df[time_col].dt.hour df["dayofweek"] = df[time_col].dt.dayofweek df["year"] = df[time_col].dt.year df["weekend"] = df[time_col].dt.dayofweek.apply(lambda x: 1 if (x > 4) else 0) """ which phase of day the time denotes [morning, afternoon, evening, night] """ def time_phase(df): def which_phase(hour): if hour >= 0 and hour <= 5: return 1 elif hour >= 6 and hour <= 11: return 2 elif hour >= 12 and hour <= 17: return 3 elif hour >= 18 and hour <= 23: return 4 return NaN time_col = "date_time" df["phase"] = df[time_col].dt.hour.apply(lambda x: which_phase(x)) datetime2features(train_csv) time_phase(train_csv) train_csv.head() train_csv.describe() """ which season of year the time denotes [summer, rainy, winter] """ def season(df): def which_season(month): if month >= 3 and month <= 6: return 1 elif month >= 7 and month <= 9: return 2 elif month >= 10 and month <= 12: return 3 elif month < 3: return 3 return NaN time_col = "date_time" df["season"] = df[time_col].dt.month.apply(lambda x: which_season(x)) season(train_csv) train_csv.head() train_csv.describe() """ ratio between relative humidity and temperature """ def ratio_rh_temp(df): df["r_rh_temp"] = df["relative_humidity"] / (df["deg_C"] + 1e-9) ratio_rh_temp(train_csv) train_csv.head() train_csv.describe() # ### Ploting correlations between columns plt.figure(figsize=(10, 8)) sns.heatmap(train_csv.corr()) model_save_folder = "models" csv_folder = "csv" os.makedirs(model_save_folder, exist_ok=True) os.makedirs(csv_folder, exist_ok=True) train_csv.to_csv(os.path.join(csv_folder, "train_edit.csv"), index=False) test_csv.describe() change_dtypes(test_csv) datetime2features(test_csv) time_phase(test_csv) season(test_csv) ratio_rh_temp(test_csv) test_csv.describe() test_csv.to_csv(os.path.join(csv_folder, "test_edit.csv"), index=False) # # Model Training from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.linear_model import ( LinearRegression, Ridge, BayesianRidge, LogisticRegression, ) from sklearn.svm import SVR from mlxtend.regressor import StackingCVRegressor import lightgbm as lgb import xgboost as xgb import catboost as cbt import sklearn.metrics as metrics import sklearn.model_selection as ms import pickle # Cross validation utility class CrossValidation: def __init__(self, df, shuffle, random_state=None): self.df = df self.random_state = random_state self.shuffle = shuffle if shuffle is True: self.df = df.sample(frac=1, random_state=self.random_state).reset_index( drop=True ) if not shuffle: self.random_state = None def hold_out_split(self, percent, stratify=None): if stratify is not None: y = self.df[stratify] train, val = ms.train_test_split( self.df, test_size=percent / 100, stratify=y, random_state=self.random_state, ) return train, val size = len(self.df) - int(len(self.df) * (percent / 100)) train = self.df.iloc[:size, :] val = self.df.iloc[size:, :] return train, val def kfold_split(self, splits, stratify=None): if stratify is not None: kf = ms.StratifiedKFold(n_splits=splits, random_state=self.random_state) y = self.df[stratify] for train, val in kf.split(X=self.df, y=y): t = self.df.iloc[train, :] v = self.df.iloc[val, :] yield t, v else: kf = ms.KFold( n_splits=splits, shuffle=self.shuffle, random_state=self.random_state ) for train, val in kf.split(X=self.df): t = self.df.iloc[train, :] v = self.df.iloc[val, :] yield t, v # calculate rmsle of predicted data def mse(y_true, y_pred): return metrics.mean_squared_error(y_true, y_pred) folds = 5 seed = 48 features_exclude = ["date_time"] targets = ["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"] features = [col for col in train_csv.columns if col not in features_exclude + targets] print(features) # ### Splitting data into folds for cross validation cv = CrossValidation(train_csv, shuffle=True, random_state=seed) fold_models = {tar: [] for tar in targets} print(fold_models) # # Training m_rfg = RandomForestRegressor(n_estimators=100) m_gb = GradientBoostingRegressor(n_estimators=100) m_lgb = lgb.LGBMRegressor(seed=seed) m_ctb = cbt.CatBoostRegressor(random_seed=seed, verbose=False) m_xgb = xgb.XGBRegressor(random_state=seed) learners = (m_rfg, m_gb, m_lgb, m_ctb, m_xgb) meta_model = BayesianRidge(normalize=True) def train_step(X, Y, evalX, evalY, learners, meta_model, verbose=True): reg = StackingCVRegressor( regressors=learners, meta_regressor=meta_model, n_jobs=-1, verbose=int(verbose) ) trainX = X.values trainY = Y.values model = reg.fit(trainX, trainY) predY_train = model.predict(trainX) train_rmsle = mse(trainY, predY_train) train_r2 = metrics.r2_score(trainY, predY_train) if verbose: print("Training mse: ", train_rmsle) print("Training r2: ", train_r2) valX = evalX.values valY = evalY.values predY_val = model.predict(valX) val_rmsle = mse(valY, predY_val) val_r2 = metrics.r2_score(valY, predY_val) if verbose: print("Validation mse: ", val_rmsle) print("Validation r2: ", val_r2) return { "model": model, "train_scores": {"r2": train_r2, "mse": train_rmsle}, "val_scores": {"r2": val_r2, "mse": val_rmsle}, } def train_folds( cv, feature_cols, target_col, num_folds, learners, meta_model, verbose=True ): fold_train_rmsle = [] fold_train_r2 = [] fold_val_rmsle = [] fold_val_r2 = [] fold_models = [] for fold, (train_, val_) in enumerate(cv.kfold_split(splits=num_folds)): result = train_step( X=train_[feature_cols], Y=train_[target_col], evalX=val_[feature_cols], evalY=val_[target_col], learners=learners, meta_model=meta_model, verbose=verbose, ) fold_train_rmsle.append(result["train_scores"]["mse"]) fold_train_r2.append(result["train_scores"]["r2"]) fold_val_rmsle.append(result["val_scores"]["mse"]) fold_val_r2.append(result["val_scores"]["r2"]) fold_models.append(result["model"]) return { "models": fold_models, "train_scores": { "r2": np.mean(fold_train_r2), "mse": np.mean(fold_train_rmsle), }, "val_scores": {"r2": np.mean(fold_val_r2), "mse": np.mean(fold_val_rmsle)}, } # ### Carbon Monoxide target = "target_carbon_monoxide" results = train_folds(cv, features, target, folds, learners, meta_model) fold_models[target] = results["models"] print("=" * 50) print("Training MSE: ", results["train_scores"]["mse"]) print("Training R2: ", results["train_scores"]["r2"]) print("Validation MSE: ", results["val_scores"]["mse"]) print("Validation R2: ", results["val_scores"]["r2"]) # ### Benzene target = "target_benzene" results = train_folds(cv, features, target, folds, learners, meta_model) fold_models[target] = results["models"] print("=" * 50) print("Training MSE: ", results["train_scores"]["mse"]) print("Training R2: ", results["train_scores"]["r2"]) print("Validation MSE: ", results["val_scores"]["mse"]) print("Validation R2: ", results["val_scores"]["r2"]) # ### Nitrogen Oxides target = "target_nitrogen_oxides" results = train_folds(cv, features, target, folds, learners, meta_model) fold_models[target] = results["models"] print("=" * 50) print("Training MSE: ", results["train_scores"]["mse"]) print("Training R2: ", results["train_scores"]["r2"]) print("Validation MSE: ", results["val_scores"]["mse"]) print("Validation R2: ", results["val_scores"]["r2"]) # ### Prediction blending from folds def get_weights(predictions, targets, apply_softmax=True): def softmax(x): f_x = np.exp(x) / np.sum(np.exp(x)) return f_x lnr = LinearRegression() lnr_model = lnr.fit(predictions, targets) if apply_softmax: return softmax(lnr_model.coef_) return lnr_model.coef_ def weighted_sum(predictions, weights): return np.dot(predictions, weights) trainX = train_csv[features].values trainY = train_csv[targets] predictions = [] preds = [] for model in fold_models[targets[0]]: preds.append(model.predict(trainX)) preds = np.array(preds) weights_0 = get_weights(preds.transpose(), trainY[targets[0]].values) print("Fold Predictions Weightings") print(weights_0) preds = weighted_sum(preds.transpose(), weights_0) predictions.append(preds) preds = [] for model in fold_models[targets[1]]: preds.append(model.predict(trainX)) preds = np.array(preds) weights_1 = get_weights(preds.transpose(), trainY[targets[1]].values) print("Fold Predictions Weightings") print(weights_1) preds = weighted_sum(preds.transpose(), weights_1) predictions.append(preds) preds = [] for model in fold_models[targets[2]]: preds.append(model.predict(trainX)) preds = np.array(preds) weights_2 = get_weights(preds.transpose(), trainY[targets[2]].values) print("Fold Predictions Weightings") print(weights_2) preds = weighted_sum(preds.transpose(), weights_2) predictions.append(preds) predictions = np.array(predictions).transpose() print(predictions.shape) print(trainY.shape) predictions = np.where(predictions < 0, 0, predictions) print("R2 score: ", metrics.r2_score(trainY, predictions)) print("RMSLE score: ", np.sqrt(metrics.mean_squared_log_error(trainY, predictions))) # # Inference on test dataset testX = test_csv[features].values predictions = [] preds = [] for model in fold_models[targets[0]]: preds.append(model.predict(testX)) preds = np.array(preds) preds = weighted_sum(preds.transpose(), weights_0) predictions.append(preds) preds = [] for model in fold_models[targets[1]]: preds.append(model.predict(testX)) preds = np.array(preds) preds = weighted_sum(preds.transpose(), weights_1) predictions.append(preds) preds = [] for model in fold_models[targets[2]]: preds.append(model.predict(testX)) preds = np.array(preds) preds = weighted_sum(preds.transpose(), weights_2) predictions.append(preds) predictions = np.array(predictions).transpose() predictions.shape predictions = np.where(predictions < 0, 0, predictions) submission_csv[targets] = predictions submission_csv.to_csv("submission.csv", index=False) submission_csv.head() for key, models in fold_models.items(): for fold, model in enumerate(models): with open( os.path.join(model_save_folder, f"{key}_fold_{fold+1}.pkl"), "wb" ) as pckl: pickle.dump(model, pckl)
false
0
4,439
7
4,439
4,439
69235506
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import tensorflow as tf from tensorflow import keras import pandas as pd X_train = pd.read_csv("../input/digit-recognizer/train.csv") X_test = pd.read_csv("../input/digit-recognizer/test.csv") y_train = X_train.label X_train = X_train.drop(["label"], axis=1) X_train /= 255 X_test /= 255 X_train = X_train.to_numpy() X_original_test = X_test.to_numpy() X_train = np.reshape(X_train, (-1, 28, 28, 1)) X_original_test = np.reshape(X_original_test, (-1, 28, 28, 1)) from tensorflow.keras import layers inputs = keras.Input(shape=(28, 28, 1)) x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(inputs) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x) x = layers.MaxPooling2D(pool_size=(3, 3))(x) x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x) # Apply global average pooling to get flat feature vectors x = layers.GlobalAveragePooling2D()(x) # Add a dense classifier on top num_classes = 10 outputs = layers.Dense(num_classes, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) model.summary() model.compile( optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"], ) from sklearn.model_selection import train_test_split X_train, X_valid, y_train, y_valid = train_test_split( X_train, y_train, test_size=0.2, random_state=0 ) X_valid, X_test, y_valid, y_test = train_test_split( X_valid, y_valid, test_size=0.2, random_state=1 ) model.fit( X_train, y_train, batch_size=32, epochs=10, validation_data=(X_valid, y_valid) ) loss, acc = model.evaluate(X_test, y_test) # returns loss and metrics print("loss: %.2f" % loss) print("acc: %.2f" % acc) predictions = model.predict(X_original_test) predictions = np.argmax(predictions, axis=1) indexes = np.linspace(1, len(X_original_test), len(X_original_test), dtype="int") len(predictions) output = pd.DataFrame({"ImageId": indexes, "Label": predictions}) output.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/235/69235506.ipynb
null
null
[{"Id": 69235506, "ScriptId": 18899411, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6495842, "CreationDate": "07/28/2021 12:48:59", "VersionNumber": 1.0, "Title": "digit identification with keras", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 86.0, "LinesInsertedFromPrevious": 86.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import tensorflow as tf from tensorflow import keras import pandas as pd X_train = pd.read_csv("../input/digit-recognizer/train.csv") X_test = pd.read_csv("../input/digit-recognizer/test.csv") y_train = X_train.label X_train = X_train.drop(["label"], axis=1) X_train /= 255 X_test /= 255 X_train = X_train.to_numpy() X_original_test = X_test.to_numpy() X_train = np.reshape(X_train, (-1, 28, 28, 1)) X_original_test = np.reshape(X_original_test, (-1, 28, 28, 1)) from tensorflow.keras import layers inputs = keras.Input(shape=(28, 28, 1)) x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(inputs) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x) x = layers.MaxPooling2D(pool_size=(3, 3))(x) x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x) # Apply global average pooling to get flat feature vectors x = layers.GlobalAveragePooling2D()(x) # Add a dense classifier on top num_classes = 10 outputs = layers.Dense(num_classes, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) model.summary() model.compile( optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"], ) from sklearn.model_selection import train_test_split X_train, X_valid, y_train, y_valid = train_test_split( X_train, y_train, test_size=0.2, random_state=0 ) X_valid, X_test, y_valid, y_test = train_test_split( X_valid, y_valid, test_size=0.2, random_state=1 ) model.fit( X_train, y_train, batch_size=32, epochs=10, validation_data=(X_valid, y_valid) ) loss, acc = model.evaluate(X_test, y_test) # returns loss and metrics print("loss: %.2f" % loss) print("acc: %.2f" % acc) predictions = model.predict(X_original_test) predictions = np.argmax(predictions, axis=1) indexes = np.linspace(1, len(X_original_test), len(X_original_test), dtype="int") len(predictions) output = pd.DataFrame({"ImageId": indexes, "Label": predictions}) output.to_csv("submission.csv", index=False)
false
0
934
3
934
934
69235792
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.impute import SimpleImputer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OrdinalEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from xgboost import XGBRegressor from sklearn.ensemble import RandomForestClassifier from sklearn import linear_model from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import confusion_matrix, accuracy_score, classification_report from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score # Input data files are available in the read-only "../input/" directory # For exa```````````````````````````mple, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # **Explore the Data set** df_train = pd.read_csv("/kaggle/input/titanic/train.csv") df_test = pd.read_csv("/kaggle/input/titanic/test.csv") df_train.shape, df_test.shape all_data = [df_train, df_test] df_train.head() df_train.describe() df_test.describe() # # **Exploratory data analysis (Visualisation )** df_train.groupby("Survived").hist(figsize=(9, 9)) # EDA Col 'Sex' df_train.groupby("Sex")[["Sex", "Survived"]].mean() print(pd.crosstab(df_train.Sex, df_train.Survived) / df_train.shape[0] * 100) sns.countplot(data=df_train, hue="Survived", x="Sex") plt.title("Survived Gender Count") plt.show() # EDA Col 'Sex' accoding to 'Age' grid = sns.FacetGrid(df_train, col="Survived", row="Sex") grid.map(plt.hist, "Age", bins=20) grid.add_legend() # EDA Col 'Pclass' df_train.groupby("Pclass")[["Pclass", "Survived"]].mean() print(pd.crosstab(df_train.Pclass, df_train.Survived)) sns.countplot(data=df_train, x="Pclass", hue="Survived") plt.title("Survived according to the Class") plt.show() grid = sns.FacetGrid(df_train, col="Survived", row="Pclass") grid.map(plt.hist, "Age", bins=20) grid.add_legend() # EDA Col 'SibSp' df_train.groupby("SibSp")[["SibSp", "Survived"]].mean() print(pd.crosstab(df_train.SibSp, df_train.Survived)) sns.countplot(data=df_train, x="SibSp", hue="Survived") plt.title("Sibling according to the Class") plt.show() # EDA Col 'Parch' df_train.groupby("Parch")[["Parch", "Survived"]].mean() print(pd.crosstab(df_train.Parch, df_train.Survived)) sns.countplot(data=df_train, x="Parch", hue="Survived") plt.title("Parch according to the Class") plt.show() # EDA Col 'Age' df_train.groupby("Age")[["Age", "Survived"]].mean() plt.figure(figsize=(15, 6)) grid = sns.FacetGrid(df_train, col="Survived") grid.map(plt.hist, "Age", bins=20) plt.show() # EDA Col 'Embarked' print(pd.crosstab(df_train.Embarked, df_train.Survived)) sns.countplot(data=df_train, x="Embarked", hue="Survived") plt.title("Embarked according to the Class") plt.show() # # **Data Cleaning and imputation** # df_train.isnull().sum() df_train.isnull().sum()[df_train.isnull().any()] df_test.isnull().sum()[df_test.isnull().any()] # Almost half of the data in "Cabin" Col is NAN, so I will drop it df_train.drop(labels="Cabin", axis=1, inplace=True) df_test.drop(labels="Cabin", axis=1, inplace=True) df_train df_test df_train["Ticket"].unique() # Almost half of the data in "Ticket" Col is Unique, so I drop it df_train.drop(labels="Ticket", axis=1, inplace=True) df_test.drop(labels="Ticket", axis=1, inplace=True) df_train df_test["Age"] = df_test["Age"].fillna(df_test["Age"].mean()) df_test.info() df_train["Age"] = df_train["Age"].fillna(df_train["Age"].mean()) df_train["Embarked"] = df_train["Embarked"].fillna("S") df_test.isnull().sum()[df_test.isnull().any()] df_test["Fare"] = df_test["Fare"].fillna(df_test["Fare"].mean()) df_train.isnull().sum()[df_train.isnull().any()] df_train.shape, df_test.shape # # **Change Var of Col 'Name'** all_data = [df_train, df_test] for dataset in all_data: dataset["title"] = dataset.Name.str.extract("([A-Za-z]+)\.", expand=False) pd.crosstab(df_train.title, df_train.Sex) for dataset in all_data: dataset["title"] = dataset["title"].replace( [ "Lady", "Countess", "Capt", "Col", "Don", "Dr", "Major", "Rev", "Sir", "Jonkheer", "Dona", ], "Rare", ) dataset["title"] = dataset["title"].replace("Mlle", "Miss") dataset["title"] = dataset["title"].replace("Ms", "Miss") dataset["title"] = dataset["title"].replace("Mme", "Mrs") print(df_train[["title", "Survived"]].groupby(["title"], as_index=False).mean()) df_train.title.value_counts() print(pd.crosstab(df_train.title, df_train.Survived)) df_train.head # Drop col 'Name' df_train.drop(labels="Name", axis=1, inplace=True) df_test.drop(labels="Name", axis=1, inplace=True) all_data = [df_train, df_test] df_train["AgeState"] = pd.cut( df_train.Age, bins=[0, 2, 5, 18, 60, 100], labels=["Infant", "Toddler", "Child", "Adult", "Senior_Citizen"], include_lowest=True, ) df_test["AgeState"] = pd.cut( df_test.Age, bins=[0, 2, 5, 18, 60, 100], labels=["Infant", "Toddler", "Child", "Adult", "Senior_Citizen"], include_lowest=True, ) df_train.AgeState.value_counts() print(pd.crosstab(df_train.AgeState, df_train.Survived)) # Drop col 'Age' df_train.drop(labels="Age", axis=1, inplace=True) df_test.drop(labels="Age", axis=1, inplace=True) all_data = [df_train, df_test] df_train.info() df_test.info() df_test.shape, df_train.shape df_train.drop(labels="PassengerId", axis=1, inplace=True) # # Categorical Variables for dataset in all_data: dataset["Sex"] = dataset["Sex"].map({"female": 0, "male": 1}).astype("int64") df_train.Sex.value_counts() for dataset in all_data: dataset["Embarked"] = ( dataset["Embarked"].map({"S": 0, "C": 1, "Q": 2}).astype("int64") ) df_train.Embarked.value_counts() for dataset in all_data: dataset["title"] = ( dataset["title"] .map({"Mr": 0, "Miss": 1, "Mrs": 2, "Master": 3, "Rare": 4}) .astype("int64") ) df_train.title.value_counts() for dataset in all_data: dataset["AgeState"] = ( dataset["AgeState"] .map({"Adult": 0, "Child": 1, "Infant": 2, "Senior_Citizen": 3, "Toddler": 4}) .astype("int64") ) df_train.AgeState.value_counts() # # **Fitting Model** X = df_train.drop("Survived", axis=1) y = df_train["Survived"] X_test_data = df_test.drop("PassengerId", axis=1).copy() X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.75, random_state=10 ) X_train.shape, X_test.shape, y_train.shape, y_test.shape # # Fitting Model (XGBRegressor) model_xg = XGBRegressor(n_estimators=500, learning_rate=0.04, random_state=0) model_xg.fit( X_train, y_train, early_stopping_rounds=5, eval_set=[(X_test, y_test)], verbose=False, ) y_pred = model_xg.predict(X_test) predictions = [round(value) for value in y_pred] training_data_accuracy_xg = accuracy_score(y_test, predictions) print("The accuracy of XGBoost Model is", (training_data_accuracy_xg * 100), "%") # # Fitting Model (Random Forest Classifer) param_grid = { "n_estimators": [200, 150, 100, 50], "criterion": ["gini", "entropy"], "max_depth": [2, 4, 6], } Model_rf = RandomForestClassifier() grid = GridSearchCV(estimator=Model_rf, param_grid=param_grid, n_jobs=-1) grid.fit(X_train, y_train) Model_rf = grid.best_estimator_ y_pred_train = Model_rf.predict(X_test) training_data_accuracy_rf = accuracy_score(y_test, y_pred_train) print("The accuracy of RandomForestC Model is", (training_data_accuracy_rf * 100), "%") rf = RandomForestClassifier(criterion="entropy", max_depth=6) rf.fit(X_train, y_train) y_pred = rf.predict(X_test) print(accuracy_score(y_pred, y_test)) y_train_pre = rf.predict(X_train) print(accuracy_score(y_train_pre, y_train)) scores = cross_val_score(rf, X_train, y_train, cv=10, scoring="accuracy") print("Scores:", scores) print("Mean:", scores.mean()) print("Standard Deviation:", scores.std()) print("Accuracy_score", accuracy_score(y_train_pre, y_train)) print("Mean:", scores.mean()) print("Variation:", scores.var()) y_preds_test = rf.predict(X_test_data) importances = pd.DataFrame( {"feature": X_train.columns, "importance": np.round(rf.feature_importances_, 3)} ) importances1 = importances.sort_values("importance", ascending=False).set_index( "feature" ) importances1.plot(kind="bar", figsize=(15, 6)) Titanic_submission = pd.DataFrame( {"Passengerid": df_test["PassengerId"], "Survived": y_preds_test} ) Titanic_submission.head() Titanic_submission.shape Titanic_submission.to_csv("Titanic_submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/235/69235792.ipynb
null
null
[{"Id": 69235792, "ScriptId": 18894419, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4151319, "CreationDate": "07/28/2021 12:52:28", "VersionNumber": 12.0, "Title": "Titanic Survival, ML", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 344.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 337.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.impute import SimpleImputer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OrdinalEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from xgboost import XGBRegressor from sklearn.ensemble import RandomForestClassifier from sklearn import linear_model from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import confusion_matrix, accuracy_score, classification_report from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score # Input data files are available in the read-only "../input/" directory # For exa```````````````````````````mple, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # **Explore the Data set** df_train = pd.read_csv("/kaggle/input/titanic/train.csv") df_test = pd.read_csv("/kaggle/input/titanic/test.csv") df_train.shape, df_test.shape all_data = [df_train, df_test] df_train.head() df_train.describe() df_test.describe() # # **Exploratory data analysis (Visualisation )** df_train.groupby("Survived").hist(figsize=(9, 9)) # EDA Col 'Sex' df_train.groupby("Sex")[["Sex", "Survived"]].mean() print(pd.crosstab(df_train.Sex, df_train.Survived) / df_train.shape[0] * 100) sns.countplot(data=df_train, hue="Survived", x="Sex") plt.title("Survived Gender Count") plt.show() # EDA Col 'Sex' accoding to 'Age' grid = sns.FacetGrid(df_train, col="Survived", row="Sex") grid.map(plt.hist, "Age", bins=20) grid.add_legend() # EDA Col 'Pclass' df_train.groupby("Pclass")[["Pclass", "Survived"]].mean() print(pd.crosstab(df_train.Pclass, df_train.Survived)) sns.countplot(data=df_train, x="Pclass", hue="Survived") plt.title("Survived according to the Class") plt.show() grid = sns.FacetGrid(df_train, col="Survived", row="Pclass") grid.map(plt.hist, "Age", bins=20) grid.add_legend() # EDA Col 'SibSp' df_train.groupby("SibSp")[["SibSp", "Survived"]].mean() print(pd.crosstab(df_train.SibSp, df_train.Survived)) sns.countplot(data=df_train, x="SibSp", hue="Survived") plt.title("Sibling according to the Class") plt.show() # EDA Col 'Parch' df_train.groupby("Parch")[["Parch", "Survived"]].mean() print(pd.crosstab(df_train.Parch, df_train.Survived)) sns.countplot(data=df_train, x="Parch", hue="Survived") plt.title("Parch according to the Class") plt.show() # EDA Col 'Age' df_train.groupby("Age")[["Age", "Survived"]].mean() plt.figure(figsize=(15, 6)) grid = sns.FacetGrid(df_train, col="Survived") grid.map(plt.hist, "Age", bins=20) plt.show() # EDA Col 'Embarked' print(pd.crosstab(df_train.Embarked, df_train.Survived)) sns.countplot(data=df_train, x="Embarked", hue="Survived") plt.title("Embarked according to the Class") plt.show() # # **Data Cleaning and imputation** # df_train.isnull().sum() df_train.isnull().sum()[df_train.isnull().any()] df_test.isnull().sum()[df_test.isnull().any()] # Almost half of the data in "Cabin" Col is NAN, so I will drop it df_train.drop(labels="Cabin", axis=1, inplace=True) df_test.drop(labels="Cabin", axis=1, inplace=True) df_train df_test df_train["Ticket"].unique() # Almost half of the data in "Ticket" Col is Unique, so I drop it df_train.drop(labels="Ticket", axis=1, inplace=True) df_test.drop(labels="Ticket", axis=1, inplace=True) df_train df_test["Age"] = df_test["Age"].fillna(df_test["Age"].mean()) df_test.info() df_train["Age"] = df_train["Age"].fillna(df_train["Age"].mean()) df_train["Embarked"] = df_train["Embarked"].fillna("S") df_test.isnull().sum()[df_test.isnull().any()] df_test["Fare"] = df_test["Fare"].fillna(df_test["Fare"].mean()) df_train.isnull().sum()[df_train.isnull().any()] df_train.shape, df_test.shape # # **Change Var of Col 'Name'** all_data = [df_train, df_test] for dataset in all_data: dataset["title"] = dataset.Name.str.extract("([A-Za-z]+)\.", expand=False) pd.crosstab(df_train.title, df_train.Sex) for dataset in all_data: dataset["title"] = dataset["title"].replace( [ "Lady", "Countess", "Capt", "Col", "Don", "Dr", "Major", "Rev", "Sir", "Jonkheer", "Dona", ], "Rare", ) dataset["title"] = dataset["title"].replace("Mlle", "Miss") dataset["title"] = dataset["title"].replace("Ms", "Miss") dataset["title"] = dataset["title"].replace("Mme", "Mrs") print(df_train[["title", "Survived"]].groupby(["title"], as_index=False).mean()) df_train.title.value_counts() print(pd.crosstab(df_train.title, df_train.Survived)) df_train.head # Drop col 'Name' df_train.drop(labels="Name", axis=1, inplace=True) df_test.drop(labels="Name", axis=1, inplace=True) all_data = [df_train, df_test] df_train["AgeState"] = pd.cut( df_train.Age, bins=[0, 2, 5, 18, 60, 100], labels=["Infant", "Toddler", "Child", "Adult", "Senior_Citizen"], include_lowest=True, ) df_test["AgeState"] = pd.cut( df_test.Age, bins=[0, 2, 5, 18, 60, 100], labels=["Infant", "Toddler", "Child", "Adult", "Senior_Citizen"], include_lowest=True, ) df_train.AgeState.value_counts() print(pd.crosstab(df_train.AgeState, df_train.Survived)) # Drop col 'Age' df_train.drop(labels="Age", axis=1, inplace=True) df_test.drop(labels="Age", axis=1, inplace=True) all_data = [df_train, df_test] df_train.info() df_test.info() df_test.shape, df_train.shape df_train.drop(labels="PassengerId", axis=1, inplace=True) # # Categorical Variables for dataset in all_data: dataset["Sex"] = dataset["Sex"].map({"female": 0, "male": 1}).astype("int64") df_train.Sex.value_counts() for dataset in all_data: dataset["Embarked"] = ( dataset["Embarked"].map({"S": 0, "C": 1, "Q": 2}).astype("int64") ) df_train.Embarked.value_counts() for dataset in all_data: dataset["title"] = ( dataset["title"] .map({"Mr": 0, "Miss": 1, "Mrs": 2, "Master": 3, "Rare": 4}) .astype("int64") ) df_train.title.value_counts() for dataset in all_data: dataset["AgeState"] = ( dataset["AgeState"] .map({"Adult": 0, "Child": 1, "Infant": 2, "Senior_Citizen": 3, "Toddler": 4}) .astype("int64") ) df_train.AgeState.value_counts() # # **Fitting Model** X = df_train.drop("Survived", axis=1) y = df_train["Survived"] X_test_data = df_test.drop("PassengerId", axis=1).copy() X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.75, random_state=10 ) X_train.shape, X_test.shape, y_train.shape, y_test.shape # # Fitting Model (XGBRegressor) model_xg = XGBRegressor(n_estimators=500, learning_rate=0.04, random_state=0) model_xg.fit( X_train, y_train, early_stopping_rounds=5, eval_set=[(X_test, y_test)], verbose=False, ) y_pred = model_xg.predict(X_test) predictions = [round(value) for value in y_pred] training_data_accuracy_xg = accuracy_score(y_test, predictions) print("The accuracy of XGBoost Model is", (training_data_accuracy_xg * 100), "%") # # Fitting Model (Random Forest Classifer) param_grid = { "n_estimators": [200, 150, 100, 50], "criterion": ["gini", "entropy"], "max_depth": [2, 4, 6], } Model_rf = RandomForestClassifier() grid = GridSearchCV(estimator=Model_rf, param_grid=param_grid, n_jobs=-1) grid.fit(X_train, y_train) Model_rf = grid.best_estimator_ y_pred_train = Model_rf.predict(X_test) training_data_accuracy_rf = accuracy_score(y_test, y_pred_train) print("The accuracy of RandomForestC Model is", (training_data_accuracy_rf * 100), "%") rf = RandomForestClassifier(criterion="entropy", max_depth=6) rf.fit(X_train, y_train) y_pred = rf.predict(X_test) print(accuracy_score(y_pred, y_test)) y_train_pre = rf.predict(X_train) print(accuracy_score(y_train_pre, y_train)) scores = cross_val_score(rf, X_train, y_train, cv=10, scoring="accuracy") print("Scores:", scores) print("Mean:", scores.mean()) print("Standard Deviation:", scores.std()) print("Accuracy_score", accuracy_score(y_train_pre, y_train)) print("Mean:", scores.mean()) print("Variation:", scores.var()) y_preds_test = rf.predict(X_test_data) importances = pd.DataFrame( {"feature": X_train.columns, "importance": np.round(rf.feature_importances_, 3)} ) importances1 = importances.sort_values("importance", ascending=False).set_index( "feature" ) importances1.plot(kind="bar", figsize=(15, 6)) Titanic_submission = pd.DataFrame( {"Passengerid": df_test["PassengerId"], "Survived": y_preds_test} ) Titanic_submission.head() Titanic_submission.shape Titanic_submission.to_csv("Titanic_submission.csv", index=False)
false
0
3,279
2
3,279
3,279
69235131
<jupyter_start><jupyter_text>EfficientNet PyTorch Recently new ConvNets architectures have been proposed in ["EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks"](https://arxiv.org/pdf/1905.11946.pdf) paper. According to the paper, model's compound scaling starting from a 'good' baseline provides an network that achieves state-of-the-art on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Pretrained weights from [lukemelas/EfficientNet-PyTorch](https://github.com/lukemelas/EfficientNet-PyTorch/) repository Kaggle dataset identifier: efficientnet-pytorch <jupyter_script># # Import things package_path = ( "../input/efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master/" ) import sys sys.path.append(package_path) import os import glob import time import random import numpy as np import pandas as pd import pydicom from pydicom.pixel_data_handlers.util import apply_voi_lut import cv2 import torch from torch import nn from torch.utils import data as torch_data from torch.nn import functional as F import efficientnet_pytorch from torch.utils.data import Dataset, DataLoader device = torch.device("cuda" if torch.cuda.is_available() else "cpu") seed = 123 def seed_everything(seed): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True seed_everything(seed) class X3D: XS = 0 S = 1 M = 2 L = 3 x3d_config = { "input_clip_length": [4, 13, 16, 16], "depth_factor": [2.2, 2.2, 2.2, 5.0], "width_factor": [1, 1, 1, 2.9], } class CFG: img_size = 256 n_frames = 10 center_crop = 0.05 cnn_features = 256 lstm_hidden = 32 n_fold = 5 n_epochs = 10 # # Model class CNN(nn.Module): def __init__(self): super().__init__() self.map = nn.Conv2d(in_channels=4, out_channels=3, kernel_size=1) self.net = efficientnet_pytorch.EfficientNet.from_name("efficientnet-b0") # checkpoint = torch.load("../input/efficientnet-pytorch/efficientnet-b0-08094119.pth") # self.net.load_state_dict(checkpoint) n_features = self.net._fc.in_features self.net._fc = nn.Linear( in_features=n_features, out_features=CFG.cnn_features, bias=True ) def forward(self, x): x = F.relu(self.map(x)) out = self.net(x) return out class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.cnn = CNN() self.rnn = nn.LSTM(CFG.cnn_features, CFG.lstm_hidden, 2, batch_first=True) self.fc = nn.Linear(CFG.lstm_hidden, 1, bias=True) def forward(self, x): # x shape: BxTxCxHxW batch_size, timesteps, C, H, W = x.size() c_in = x.view(batch_size * timesteps, C, H, W) c_out = self.cnn(c_in) r_in = c_out.view(batch_size, timesteps, -1) output, (hn, cn) = self.rnn(r_in) out = self.fc(hn[-1]) return out # # Data Processing def load_dicom(path): dicom = pydicom.read_file(path) data = dicom.pixel_array data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) height, width = data.shape margin_h = int(height * CFG.center_crop) margin_w = int(width * CFG.center_crop) data = data[margin_h:-margin_h, margin_w:-margin_w] data = np.float32(cv2.resize(data, (CFG.img_size, CFG.img_size))) return torch.tensor(data) def load_dicom_line(path): t_paths = sorted( glob.glob(os.path.join(path, "*")), key=lambda x: int(x[:-4].split("-")[-1]), ) images = [] for filename in t_paths: data = load_dicom(filename) if data.max() == 0: continue images.append(data) return images def load_image(path): image = cv2.imread(path, 0) if image is None: return np.zeros((CFG.img_size, CFG.img_size)) image = cv2.resize(image, (CFG.img_size, CFG.img_size)) / 255 return torch.tensor(image) def get_valid_frames(t_paths): res = [] for path in t_paths: img = load_dicom(path) if img.view(-1).mean(0) != 0: res.append(path) return res def uniform_temporal_subsample(x, num_samples): """ Moddified from https://github.com/facebookresearch/pytorchvideo/blob/d7874f788bc00a7badfb4310a912f6e531ffd6d3/pytorchvideo/transforms/functional.py#L19 Args: x: input list num_samples: The number of equispaced samples to be selected Returns: Output list """ t = len(x) indices = torch.linspace(0, t - 1, num_samples) indices = torch.clamp(indices, 0, t - 1).long() return [x[i] for i in indices] class TestDataRetriever(Dataset): def __init__(self, paths, transform=None): self.paths = paths self.transform = transform def __len__(self): return len(self.paths) def read_video(self, vid_paths): video = [load_dicom(path) for path in vid_paths] if len(video) == 0: video = torch.zeros(CFG.n_frames, CFG.img_size, CFG.img_size) else: video = torch.stack(video) # T * C * H * W # video = torch.transpose(video, 0, 1) # C * T * H * W return video def __getitem__(self, index): _id = self.paths[index] patient_path = f"../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/{str(_id).zfill(5)}/" channels = [] for t in ["FLAIR", "T1w", "T1wCE", "T2w"]: t_paths = sorted( glob.glob(os.path.join(patient_path, t, "*")), key=lambda x: int(x[:-4].split("-")[-1]), ) num_samples = CFG.n_frames # t_paths = get_valid_frames(t_paths) if len(t_paths) < num_samples: in_frames_path = t_paths else: in_frames_path = uniform_temporal_subsample(t_paths, num_samples) channel = self.read_video(in_frames_path) if channel.shape[0] == 0: print("1 channel empty") channel = torch.zeros(num_samples, CFG.img_size, CFG.img_size) channels.append(channel) channels = torch.stack(channels).transpose(0, 1) return {"X": channels.float(), "id": _id} df = pd.read_csv( "../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv" ) df.head(10) # # Inference models = [] for i in range(1, CFG.n_fold + 1): model = Model() model.to(device) checkpoint = torch.load(f"../input/rnsa-21-cnn-lstm-train/best-model-{i}.pth") model.load_state_dict(checkpoint["model_state_dict"]) model.eval() models.append(model) submission = pd.read_csv( "../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv" ) test_data_retriever = TestDataRetriever(submission["BraTS21ID"].values) test_loader = torch_data.DataLoader( test_data_retriever, batch_size=4, shuffle=False, num_workers=8, ) y_pred = [] ids = [] for e, batch in enumerate(test_loader): print(f"{e}/{len(test_loader)}", end="\r") with torch.no_grad(): tmp_pred = np.zeros((batch["X"].shape[0],)) for model in models: tmp_res = ( torch.sigmoid(model(batch["X"].to(device))).cpu().numpy().squeeze() ) tmp_pred += tmp_res tmp_pred = tmp_pred / len(models) y_pred.extend(tmp_pred) ids.extend(batch["id"].numpy().tolist()) submission = pd.DataFrame({"BraTS21ID": ids, "MGMT_value": y_pred}) submission.to_csv("submission.csv", index=False) submission
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/235/69235131.ipynb
efficientnet-pytorch
hmendonca
[{"Id": 69235131, "ScriptId": 18816145, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2310224, "CreationDate": "07/28/2021 12:43:32", "VersionNumber": 12.0, "Title": "[RNSA-21] CNN-LSTM Inference", "EvaluationDate": "07/28/2021", "IsChange": false, "TotalLines": 252.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 252.0, "LinesInsertedFromFork": 61.0, "LinesDeletedFromFork": 36.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 191.0, "TotalVotes": 0}]
[{"Id": 92156580, "KernelVersionId": 69235131, "SourceDatasetVersionId": 848739}, {"Id": 92156581, "KernelVersionId": 69235131, "SourceDatasetVersionId": 2425289}]
[{"Id": 848739, "DatasetId": 251095, "DatasourceVersionId": 875102, "CreatorUserId": 998023, "LicenseName": "Unknown", "CreationDate": "12/18/2019 00:26:13", "VersionNumber": 2.0, "Title": "EfficientNet PyTorch", "Slug": "efficientnet-pytorch", "Subtitle": "Pre-trained EfficientNet models (B0-B7) for PyTorch", "Description": "Recently new ConvNets architectures have been proposed in [\"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks\"](https://arxiv.org/pdf/1905.11946.pdf) paper. According to the paper, model's compound scaling starting from a 'good' baseline provides an network that achieves state-of-the-art on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet.\n\nPretrained weights from [lukemelas/EfficientNet-PyTorch](https://github.com/lukemelas/EfficientNet-PyTorch/) repository", "VersionNotes": "Migrated data", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 251095, "CreatorUserId": 451025, "OwnerUserId": 451025.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 848739.0, "CurrentDatasourceVersionId": 875102.0, "ForumId": 262329, "Type": 2, "CreationDate": "06/30/2019 16:55:54", "LastActivityDate": "06/30/2019", "TotalViews": 42917, "TotalDownloads": 6141, "TotalVotes": 241, "TotalKernels": 279}]
[{"Id": 451025, "UserName": "hmendonca", "DisplayName": "Henrique Mendon\u00e7a", "RegisterDate": "10/26/2015", "PerformanceTier": 3}]
# # Import things package_path = ( "../input/efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master/" ) import sys sys.path.append(package_path) import os import glob import time import random import numpy as np import pandas as pd import pydicom from pydicom.pixel_data_handlers.util import apply_voi_lut import cv2 import torch from torch import nn from torch.utils import data as torch_data from torch.nn import functional as F import efficientnet_pytorch from torch.utils.data import Dataset, DataLoader device = torch.device("cuda" if torch.cuda.is_available() else "cpu") seed = 123 def seed_everything(seed): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True seed_everything(seed) class X3D: XS = 0 S = 1 M = 2 L = 3 x3d_config = { "input_clip_length": [4, 13, 16, 16], "depth_factor": [2.2, 2.2, 2.2, 5.0], "width_factor": [1, 1, 1, 2.9], } class CFG: img_size = 256 n_frames = 10 center_crop = 0.05 cnn_features = 256 lstm_hidden = 32 n_fold = 5 n_epochs = 10 # # Model class CNN(nn.Module): def __init__(self): super().__init__() self.map = nn.Conv2d(in_channels=4, out_channels=3, kernel_size=1) self.net = efficientnet_pytorch.EfficientNet.from_name("efficientnet-b0") # checkpoint = torch.load("../input/efficientnet-pytorch/efficientnet-b0-08094119.pth") # self.net.load_state_dict(checkpoint) n_features = self.net._fc.in_features self.net._fc = nn.Linear( in_features=n_features, out_features=CFG.cnn_features, bias=True ) def forward(self, x): x = F.relu(self.map(x)) out = self.net(x) return out class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.cnn = CNN() self.rnn = nn.LSTM(CFG.cnn_features, CFG.lstm_hidden, 2, batch_first=True) self.fc = nn.Linear(CFG.lstm_hidden, 1, bias=True) def forward(self, x): # x shape: BxTxCxHxW batch_size, timesteps, C, H, W = x.size() c_in = x.view(batch_size * timesteps, C, H, W) c_out = self.cnn(c_in) r_in = c_out.view(batch_size, timesteps, -1) output, (hn, cn) = self.rnn(r_in) out = self.fc(hn[-1]) return out # # Data Processing def load_dicom(path): dicom = pydicom.read_file(path) data = dicom.pixel_array data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) height, width = data.shape margin_h = int(height * CFG.center_crop) margin_w = int(width * CFG.center_crop) data = data[margin_h:-margin_h, margin_w:-margin_w] data = np.float32(cv2.resize(data, (CFG.img_size, CFG.img_size))) return torch.tensor(data) def load_dicom_line(path): t_paths = sorted( glob.glob(os.path.join(path, "*")), key=lambda x: int(x[:-4].split("-")[-1]), ) images = [] for filename in t_paths: data = load_dicom(filename) if data.max() == 0: continue images.append(data) return images def load_image(path): image = cv2.imread(path, 0) if image is None: return np.zeros((CFG.img_size, CFG.img_size)) image = cv2.resize(image, (CFG.img_size, CFG.img_size)) / 255 return torch.tensor(image) def get_valid_frames(t_paths): res = [] for path in t_paths: img = load_dicom(path) if img.view(-1).mean(0) != 0: res.append(path) return res def uniform_temporal_subsample(x, num_samples): """ Moddified from https://github.com/facebookresearch/pytorchvideo/blob/d7874f788bc00a7badfb4310a912f6e531ffd6d3/pytorchvideo/transforms/functional.py#L19 Args: x: input list num_samples: The number of equispaced samples to be selected Returns: Output list """ t = len(x) indices = torch.linspace(0, t - 1, num_samples) indices = torch.clamp(indices, 0, t - 1).long() return [x[i] for i in indices] class TestDataRetriever(Dataset): def __init__(self, paths, transform=None): self.paths = paths self.transform = transform def __len__(self): return len(self.paths) def read_video(self, vid_paths): video = [load_dicom(path) for path in vid_paths] if len(video) == 0: video = torch.zeros(CFG.n_frames, CFG.img_size, CFG.img_size) else: video = torch.stack(video) # T * C * H * W # video = torch.transpose(video, 0, 1) # C * T * H * W return video def __getitem__(self, index): _id = self.paths[index] patient_path = f"../input/rsna-miccai-brain-tumor-radiogenomic-classification/test/{str(_id).zfill(5)}/" channels = [] for t in ["FLAIR", "T1w", "T1wCE", "T2w"]: t_paths = sorted( glob.glob(os.path.join(patient_path, t, "*")), key=lambda x: int(x[:-4].split("-")[-1]), ) num_samples = CFG.n_frames # t_paths = get_valid_frames(t_paths) if len(t_paths) < num_samples: in_frames_path = t_paths else: in_frames_path = uniform_temporal_subsample(t_paths, num_samples) channel = self.read_video(in_frames_path) if channel.shape[0] == 0: print("1 channel empty") channel = torch.zeros(num_samples, CFG.img_size, CFG.img_size) channels.append(channel) channels = torch.stack(channels).transpose(0, 1) return {"X": channels.float(), "id": _id} df = pd.read_csv( "../input/rsna-miccai-brain-tumor-radiogenomic-classification/train_labels.csv" ) df.head(10) # # Inference models = [] for i in range(1, CFG.n_fold + 1): model = Model() model.to(device) checkpoint = torch.load(f"../input/rnsa-21-cnn-lstm-train/best-model-{i}.pth") model.load_state_dict(checkpoint["model_state_dict"]) model.eval() models.append(model) submission = pd.read_csv( "../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv" ) test_data_retriever = TestDataRetriever(submission["BraTS21ID"].values) test_loader = torch_data.DataLoader( test_data_retriever, batch_size=4, shuffle=False, num_workers=8, ) y_pred = [] ids = [] for e, batch in enumerate(test_loader): print(f"{e}/{len(test_loader)}", end="\r") with torch.no_grad(): tmp_pred = np.zeros((batch["X"].shape[0],)) for model in models: tmp_res = ( torch.sigmoid(model(batch["X"].to(device))).cpu().numpy().squeeze() ) tmp_pred += tmp_res tmp_pred = tmp_pred / len(models) y_pred.extend(tmp_pred) ids.extend(batch["id"].numpy().tolist()) submission = pd.DataFrame({"BraTS21ID": ids, "MGMT_value": y_pred}) submission.to_csv("submission.csv", index=False) submission
false
2
2,345
0
2,518
2,345
69332838
<jupyter_start><jupyter_text>Netflix Dataset Kaggle dataset identifier: netflix-dataset <jupyter_code>import pandas as pd df = pd.read_csv('netflix-dataset/Netflix Dataset.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 7789 entries, 0 to 7788 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Show_Id 7789 non-null object 1 Category 7789 non-null object 2 Title 7789 non-null object 3 Director 5401 non-null object 4 Cast 7071 non-null object 5 Country 7282 non-null object 6 Release_Date 7779 non-null object 7 Rating 7782 non-null object 8 Duration 7789 non-null object 9 Type 7789 non-null object 10 Description 7789 non-null object dtypes: object(11) memory usage: 669.5+ KB <jupyter_text>Examples: { "Show_Id": "s1", "Category": "TV Show", "Title": "3%", "Director": null, "Cast": "Jo\u00e3o Miguel, Bianca Comparato, Michel Gomes, Rodolfo Valente, Vaneza Oliveira, Rafael Lozano, Viviane Porto, Mel Fronckowiak, Sergio Mamberti, Zez\u00e9 Motta, Celso Frateschi", "Country": "Brazil", "Release_Date": "August 14, 2020", "Rating": "TV-MA", "Duration": "4 Seasons", "Type": "International TV Shows, TV Dramas, TV Sci-Fi & Fantasy", "Description": "In a future where the elite inhabit an island paradise far from the crowded slums, you get one chance to join the 3% saved from squalor." } { "Show_Id": "s2", "Category": "Movie", "Title": "07:19", "Director": "Jorge Michel Grau", "Cast": "Demi\u00e1n Bichir, H\u00e9ctor Bonilla, Oscar Serrano, Azalia Ortiz, Octavio Michel, Carmen Beato", "Country": "Mexico", "Release_Date": "December 23, 2016", "Rating": "TV-MA", "Duration": "93 min", "Type": "Dramas, International Movies", "Description": "After a devastating earthquake hits Mexico City, trapped survivors from all walks of life wait to be rescued while trying desperately to stay alive." } { "Show_Id": "s3", "Category": "Movie", "Title": "23:59", "Director": "Gilbert Chan", "Cast": "Tedd Chan, Stella Chung, Henley Hii, Lawrence Koh, Tommy Kuan, Josh Lai, Mark Lee, Susan Leong, Benjamin Lim", "Country": "Singapore", "Release_Date": "December 20, 2018", "Rating": "R", "Duration": "78 min", "Type": "Horror Movies, International Movies", "Description": "When an army recruit is found dead, his fellow soldiers are forced to confront a terrifying secret that's haunting their jungle island training camp." } { "Show_Id": "s4", "Category": "Movie", "Title": "9", "Director": "Shane Acker", "Cast": "Elijah Wood, John C. Reilly, Jennifer Connelly, Christopher Plummer, Crispin Glover, Martin Landau, Fred Tatasciore, Alan Oppenheimer, Tom Kane", "Country": "United States", "Release_Date": "November 16, 2017", "Rating": "PG-13", "Duration": "80 min", "Type": "Action & Adventure, Independent Movies, Sci-Fi & Fantasy", "Description": "In a postapocalyptic world, rag-doll robots hide in fear from dangerous machines out to exterminate them, until a brave newcomer joins the group." } <jupyter_script># # Greetings!! This notebook is to do EDA and to solve questions on Netflix dataset import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ![](https://i.pinimg.com/originals/f6/b1/1b/f6b11bd53411d94338117381cf9a9b9b.gif) import numpy as np import pandas as pd import matplotlib.pyplot as plt pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", None) df = pd.read_csv("../input/netflix-dataset/Netflix Dataset.csv") df.head(2) df.tail(2) df.shape df.size df.ndim df.index df.columns df[df.duplicated()] df.head(2) df["Category"].unique() df["Director"].unique() df["Country"].unique() df["Rating"].unique() df["Duration"].unique() # # Task:- # # Task.1. Is there any Duplicate Record in this dataset ? If yes, then remove the duplicate records. df[df.duplicated()] df.drop_duplicates(inplace=True) df[df.duplicated()] # No more duplicate values # # Task.2. Is there any Null Value present in any column ? Show with Heat-map. df.isnull().sum() import seaborn as sns sns.heatmap(df.isnull()) # # Questions:- # # Q.1. For 'House of Cards', what is the Show Id and Who is the Director of this show ? df.head(2) # df[df["Title"]=="House of Cards"] # df[df["Title"].isin(["House of Cards"])] df[df["Title"].str.contains("House of Cards")] # # Q.2. In which year highest number of the TV Shows & Movies were released ? Show with Bar Graph. df.head(2) df.dtypes df["Release_Date"] = pd.to_datetime(df["Release_Date"]) df.head(2) df["Release_Date"].dt.year.value_counts() df["Release_Date"].dt.year.value_counts().plot(kind="bar") # # Q.3. How many Movies & TV Shows are in the dataset ? Show with Bar Graph. df.dtypes df["Category"].value_counts().plot(kind="bar") sns.countplot(df["Category"]) # # Q.4. Show all the Movies that were released in year 2000. df.head(2) df["Year"] = df["Release_Date"].dt.year df.head(2) df[(df["Category"] == "Movie") & (df["Year"] == 2020)].head(5) # # Q.5. Show only the Titles of all TV Shows that were released in India only. df.head(2) df[(df["Country"] == "India") & (df["Category"] == "TV Show")]["Title"].head(5) # # Q.6. Show Top 10 Directors, who gave the highest number of TV Shows & Movies to Netflix ? df.head(2) df["Director"].value_counts().head(10) # # Q.7. Show all the Records, where "Category is Movie and Type is Comedies" or "Country is United Kingdom". df.head(3) df[ (df["Category"] == "Movie") & (df["Type"] == "Comedies") | (df["Country"] == "Untied kingdom") ].head(5) # # Q.8. In how many movies/shows, Tom Cruise was cast ? df.head(3) df[df["Cast"] == "Tom Cruise"] # # Q.9. What are the different Ratings defined by Netflix ? df.head(2) df["Rating"].unique() df["Rating"].nunique() # # Q.9.1. How many Movies got the 'TV-14' rating, in Canada ? df.head(2) df[ (df["Rating"] == "TV-14") & (df["Country"] == "India") & (df["Category"] == "Movies") ] # # Q.9.2. How many TV Show got the 'R' rating, after year 2018 ? df[(df["Category"] == "Movies") & (df["Rating"] == "R") & (df["Year"] > 2018)] # # Q.10. What is the maximum duration of a Movie/Show on Netflix ? df.head(2) df["Duration"].unique() df["Duration"].dtypes df[["Minutes", "Units"]] = df["Duration"].str.split(" ", expand=True) df.head(2) df["Minutes"].max() df["Minutes"].min() # # Q.11. How can we sort the dataset by Year ? df["Year"].sort_values(ascending=True).head(5) # # Q.12. Find all the instances where : Category is 'Movie' and Type is 'Dramas' or Category is 'TV Show' & Type is 'Kids' TV'. df[ (df["Category"] == "Movie") & (df["Type"] == "Dramas") | (df["Category"] == "TV Show") & (df["Type"] == "Kids") ]
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332838.ipynb
netflix-dataset
sonalisingh1411
[{"Id": 69332838, "ScriptId": 18927570, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3288534, "CreationDate": "07/29/2021 16:50:22", "VersionNumber": 1.0, "Title": "Project 5 Data analysis on Netflix Dataset", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 189.0, "LinesInsertedFromPrevious": 189.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92371310, "KernelVersionId": 69332838, "SourceDatasetVersionId": 2477104}]
[{"Id": 2477104, "DatasetId": 1499119, "DatasourceVersionId": 2519622, "CreatorUserId": 3288534, "LicenseName": "Unknown", "CreationDate": "07/29/2021 15:22:21", "VersionNumber": 1.0, "Title": "Netflix Dataset", "Slug": "netflix-dataset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1499119, "CreatorUserId": 3288534, "OwnerUserId": 3288534.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2477104.0, "CurrentDatasourceVersionId": 2519622.0, "ForumId": 1518848, "Type": 2, "CreationDate": "07/29/2021 15:22:21", "LastActivityDate": "07/29/2021", "TotalViews": 1390, "TotalDownloads": 131, "TotalVotes": 7, "TotalKernels": 7}]
[{"Id": 3288534, "UserName": "sonalisingh1411", "DisplayName": "\ud83d\ude80\ud835\udc12\ud835\udc28\ud835\udc27\ud835\udc1a\ud835\udc25\ud835\udc22 \ud835\udc12\ud835\udc22\ud835\udc27\ud835\udc20\ud835\udc21", "RegisterDate": "05/29/2019", "PerformanceTier": 3}]
# # Greetings!! This notebook is to do EDA and to solve questions on Netflix dataset import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ![](https://i.pinimg.com/originals/f6/b1/1b/f6b11bd53411d94338117381cf9a9b9b.gif) import numpy as np import pandas as pd import matplotlib.pyplot as plt pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", None) df = pd.read_csv("../input/netflix-dataset/Netflix Dataset.csv") df.head(2) df.tail(2) df.shape df.size df.ndim df.index df.columns df[df.duplicated()] df.head(2) df["Category"].unique() df["Director"].unique() df["Country"].unique() df["Rating"].unique() df["Duration"].unique() # # Task:- # # Task.1. Is there any Duplicate Record in this dataset ? If yes, then remove the duplicate records. df[df.duplicated()] df.drop_duplicates(inplace=True) df[df.duplicated()] # No more duplicate values # # Task.2. Is there any Null Value present in any column ? Show with Heat-map. df.isnull().sum() import seaborn as sns sns.heatmap(df.isnull()) # # Questions:- # # Q.1. For 'House of Cards', what is the Show Id and Who is the Director of this show ? df.head(2) # df[df["Title"]=="House of Cards"] # df[df["Title"].isin(["House of Cards"])] df[df["Title"].str.contains("House of Cards")] # # Q.2. In which year highest number of the TV Shows & Movies were released ? Show with Bar Graph. df.head(2) df.dtypes df["Release_Date"] = pd.to_datetime(df["Release_Date"]) df.head(2) df["Release_Date"].dt.year.value_counts() df["Release_Date"].dt.year.value_counts().plot(kind="bar") # # Q.3. How many Movies & TV Shows are in the dataset ? Show with Bar Graph. df.dtypes df["Category"].value_counts().plot(kind="bar") sns.countplot(df["Category"]) # # Q.4. Show all the Movies that were released in year 2000. df.head(2) df["Year"] = df["Release_Date"].dt.year df.head(2) df[(df["Category"] == "Movie") & (df["Year"] == 2020)].head(5) # # Q.5. Show only the Titles of all TV Shows that were released in India only. df.head(2) df[(df["Country"] == "India") & (df["Category"] == "TV Show")]["Title"].head(5) # # Q.6. Show Top 10 Directors, who gave the highest number of TV Shows & Movies to Netflix ? df.head(2) df["Director"].value_counts().head(10) # # Q.7. Show all the Records, where "Category is Movie and Type is Comedies" or "Country is United Kingdom". df.head(3) df[ (df["Category"] == "Movie") & (df["Type"] == "Comedies") | (df["Country"] == "Untied kingdom") ].head(5) # # Q.8. In how many movies/shows, Tom Cruise was cast ? df.head(3) df[df["Cast"] == "Tom Cruise"] # # Q.9. What are the different Ratings defined by Netflix ? df.head(2) df["Rating"].unique() df["Rating"].nunique() # # Q.9.1. How many Movies got the 'TV-14' rating, in Canada ? df.head(2) df[ (df["Rating"] == "TV-14") & (df["Country"] == "India") & (df["Category"] == "Movies") ] # # Q.9.2. How many TV Show got the 'R' rating, after year 2018 ? df[(df["Category"] == "Movies") & (df["Rating"] == "R") & (df["Year"] > 2018)] # # Q.10. What is the maximum duration of a Movie/Show on Netflix ? df.head(2) df["Duration"].unique() df["Duration"].dtypes df[["Minutes", "Units"]] = df["Duration"].str.split(" ", expand=True) df.head(2) df["Minutes"].max() df["Minutes"].min() # # Q.11. How can we sort the dataset by Year ? df["Year"].sort_values(ascending=True).head(5) # # Q.12. Find all the instances where : Category is 'Movie' and Type is 'Dramas' or Category is 'TV Show' & Type is 'Kids' TV'. df[ (df["Category"] == "Movie") & (df["Type"] == "Dramas") | (df["Category"] == "TV Show") & (df["Type"] == "Kids") ]
[{"netflix-dataset/Netflix Dataset.csv": {"column_names": "[\"Show_Id\", \"Category\", \"Title\", \"Director\", \"Cast\", \"Country\", \"Release_Date\", \"Rating\", \"Duration\", \"Type\", \"Description\"]", "column_data_types": "{\"Show_Id\": \"object\", \"Category\": \"object\", \"Title\": \"object\", \"Director\": \"object\", \"Cast\": \"object\", \"Country\": \"object\", \"Release_Date\": \"object\", \"Rating\": \"object\", \"Duration\": \"object\", \"Type\": \"object\", \"Description\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 7789 entries, 0 to 7788\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Show_Id 7789 non-null object\n 1 Category 7789 non-null object\n 2 Title 7789 non-null object\n 3 Director 5401 non-null object\n 4 Cast 7071 non-null object\n 5 Country 7282 non-null object\n 6 Release_Date 7779 non-null object\n 7 Rating 7782 non-null object\n 8 Duration 7789 non-null object\n 9 Type 7789 non-null object\n 10 Description 7789 non-null object\ndtypes: object(11)\nmemory usage: 669.5+ KB\n", "summary": "{\"Show_Id\": {\"count\": 7789, \"unique\": 7787, \"top\": \"s6621\", \"freq\": 2}, \"Category\": {\"count\": 7789, \"unique\": 2, \"top\": \"Movie\", \"freq\": 5379}, \"Title\": {\"count\": 7789, \"unique\": 7787, \"top\": \"The Lost Okoroshi\", \"freq\": 2}, \"Director\": {\"count\": 5401, \"unique\": 4050, \"top\": \"Ra\\u00fal Campos, Jan Suter\", \"freq\": 18}, \"Cast\": {\"count\": 7071, \"unique\": 6831, \"top\": \"David Attenborough\", \"freq\": 18}, \"Country\": {\"count\": 7282, \"unique\": 681, \"top\": \"United States\", \"freq\": 2556}, \"Release_Date\": {\"count\": 7779, \"unique\": 1565, \"top\": \"January 1, 2020\", \"freq\": 118}, \"Rating\": {\"count\": 7782, \"unique\": 14, \"top\": \"TV-MA\", \"freq\": 2865}, \"Duration\": {\"count\": 7789, \"unique\": 216, \"top\": \"1 Season\", \"freq\": 1608}, \"Type\": {\"count\": 7789, \"unique\": 492, \"top\": \"Documentaries\", \"freq\": 334}, \"Description\": {\"count\": 7789, \"unique\": 7769, \"top\": \"Multiple women report their husbands as missing but when it appears they are looking for the same man, a police officer traces their cryptic connection.\", \"freq\": 3}}", "examples": "{\"Show_Id\":{\"0\":\"s1\",\"1\":\"s2\",\"2\":\"s3\",\"3\":\"s4\"},\"Category\":{\"0\":\"TV Show\",\"1\":\"Movie\",\"2\":\"Movie\",\"3\":\"Movie\"},\"Title\":{\"0\":\"3%\",\"1\":\"07:19\",\"2\":\"23:59\",\"3\":\"9\"},\"Director\":{\"0\":null,\"1\":\"Jorge Michel Grau\",\"2\":\"Gilbert Chan\",\"3\":\"Shane Acker\"},\"Cast\":{\"0\":\"Jo\\u00e3o Miguel, Bianca Comparato, Michel Gomes, Rodolfo Valente, Vaneza Oliveira, Rafael Lozano, Viviane Porto, Mel Fronckowiak, Sergio Mamberti, Zez\\u00e9 Motta, Celso Frateschi\",\"1\":\"Demi\\u00e1n Bichir, H\\u00e9ctor Bonilla, Oscar Serrano, Azalia Ortiz, Octavio Michel, Carmen Beato\",\"2\":\"Tedd Chan, Stella Chung, Henley Hii, Lawrence Koh, Tommy Kuan, Josh Lai, Mark Lee, Susan Leong, Benjamin Lim\",\"3\":\"Elijah Wood, John C. Reilly, Jennifer Connelly, Christopher Plummer, Crispin Glover, Martin Landau, Fred Tatasciore, Alan Oppenheimer, Tom Kane\"},\"Country\":{\"0\":\"Brazil\",\"1\":\"Mexico\",\"2\":\"Singapore\",\"3\":\"United States\"},\"Release_Date\":{\"0\":\"August 14, 2020\",\"1\":\"December 23, 2016\",\"2\":\"December 20, 2018\",\"3\":\"November 16, 2017\"},\"Rating\":{\"0\":\"TV-MA\",\"1\":\"TV-MA\",\"2\":\"R\",\"3\":\"PG-13\"},\"Duration\":{\"0\":\"4 Seasons\",\"1\":\"93 min\",\"2\":\"78 min\",\"3\":\"80 min\"},\"Type\":{\"0\":\"International TV Shows, TV Dramas, TV Sci-Fi & Fantasy\",\"1\":\"Dramas, International Movies\",\"2\":\"Horror Movies, International Movies\",\"3\":\"Action & Adventure, Independent Movies, Sci-Fi & Fantasy\"},\"Description\":{\"0\":\"In a future where the elite inhabit an island paradise far from the crowded slums, you get one chance to join the 3% saved from squalor.\",\"1\":\"After a devastating earthquake hits Mexico City, trapped survivors from all walks of life wait to be rescued while trying desperately to stay alive.\",\"2\":\"When an army recruit is found dead, his fellow soldiers are forced to confront a terrifying secret that's haunting their jungle island training camp.\",\"3\":\"In a postapocalyptic world, rag-doll robots hide in fear from dangerous machines out to exterminate them, until a brave newcomer joins the group.\"}}"}}]
true
1
<start_data_description><data_path>netflix-dataset/Netflix Dataset.csv: <column_names> ['Show_Id', 'Category', 'Title', 'Director', 'Cast', 'Country', 'Release_Date', 'Rating', 'Duration', 'Type', 'Description'] <column_types> {'Show_Id': 'object', 'Category': 'object', 'Title': 'object', 'Director': 'object', 'Cast': 'object', 'Country': 'object', 'Release_Date': 'object', 'Rating': 'object', 'Duration': 'object', 'Type': 'object', 'Description': 'object'} <dataframe_Summary> {'Show_Id': {'count': 7789, 'unique': 7787, 'top': 's6621', 'freq': 2}, 'Category': {'count': 7789, 'unique': 2, 'top': 'Movie', 'freq': 5379}, 'Title': {'count': 7789, 'unique': 7787, 'top': 'The Lost Okoroshi', 'freq': 2}, 'Director': {'count': 5401, 'unique': 4050, 'top': 'Raúl Campos, Jan Suter', 'freq': 18}, 'Cast': {'count': 7071, 'unique': 6831, 'top': 'David Attenborough', 'freq': 18}, 'Country': {'count': 7282, 'unique': 681, 'top': 'United States', 'freq': 2556}, 'Release_Date': {'count': 7779, 'unique': 1565, 'top': 'January 1, 2020', 'freq': 118}, 'Rating': {'count': 7782, 'unique': 14, 'top': 'TV-MA', 'freq': 2865}, 'Duration': {'count': 7789, 'unique': 216, 'top': '1 Season', 'freq': 1608}, 'Type': {'count': 7789, 'unique': 492, 'top': 'Documentaries', 'freq': 334}, 'Description': {'count': 7789, 'unique': 7769, 'top': 'Multiple women report their husbands as missing but when it appears they are looking for the same man, a police officer traces their cryptic connection.', 'freq': 3}} <dataframe_info> RangeIndex: 7789 entries, 0 to 7788 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Show_Id 7789 non-null object 1 Category 7789 non-null object 2 Title 7789 non-null object 3 Director 5401 non-null object 4 Cast 7071 non-null object 5 Country 7282 non-null object 6 Release_Date 7779 non-null object 7 Rating 7782 non-null object 8 Duration 7789 non-null object 9 Type 7789 non-null object 10 Description 7789 non-null object dtypes: object(11) memory usage: 669.5+ KB <some_examples> {'Show_Id': {'0': 's1', '1': 's2', '2': 's3', '3': 's4'}, 'Category': {'0': 'TV Show', '1': 'Movie', '2': 'Movie', '3': 'Movie'}, 'Title': {'0': '3%', '1': '07:19', '2': '23:59', '3': '9'}, 'Director': {'0': None, '1': 'Jorge Michel Grau', '2': 'Gilbert Chan', '3': 'Shane Acker'}, 'Cast': {'0': 'João Miguel, Bianca Comparato, Michel Gomes, Rodolfo Valente, Vaneza Oliveira, Rafael Lozano, Viviane Porto, Mel Fronckowiak, Sergio Mamberti, Zezé Motta, Celso Frateschi', '1': 'Demián Bichir, Héctor Bonilla, Oscar Serrano, Azalia Ortiz, Octavio Michel, Carmen Beato', '2': 'Tedd Chan, Stella Chung, Henley Hii, Lawrence Koh, Tommy Kuan, Josh Lai, Mark Lee, Susan Leong, Benjamin Lim', '3': 'Elijah Wood, John C. Reilly, Jennifer Connelly, Christopher Plummer, Crispin Glover, Martin Landau, Fred Tatasciore, Alan Oppenheimer, Tom Kane'}, 'Country': {'0': 'Brazil', '1': 'Mexico', '2': 'Singapore', '3': 'United States'}, 'Release_Date': {'0': 'August 14, 2020', '1': 'December 23, 2016', '2': 'December 20, 2018', '3': 'November 16, 2017'}, 'Rating': {'0': 'TV-MA', '1': 'TV-MA', '2': 'R', '3': 'PG-13'}, 'Duration': {'0': '4 Seasons', '1': '93 min', '2': '78 min', '3': '80 min'}, 'Type': {'0': 'International TV Shows, TV Dramas, TV Sci-Fi & Fantasy', '1': 'Dramas, International Movies', '2': 'Horror Movies, International Movies', '3': 'Action & Adventure, Independent Movies, Sci-Fi & Fantasy'}, 'Description': {'0': 'In a future where the elite inhabit an island paradise far from the crowded slums, you get one chance to join the 3% saved from squalor.', '1': 'After a devastating earthquake hits Mexico City, trapped survivors from all walks of life wait to be rescued while trying desperately to stay alive.', '2': "When an army recruit is found dead, his fellow soldiers are forced to confront a terrifying secret that's haunting their jungle island training camp.", '3': 'In a postapocalyptic world, rag-doll robots hide in fear from dangerous machines out to exterminate them, until a brave newcomer joins the group.'}} <end_description>
1,488
0
2,626
1,488
69332562
import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) train = pd.read_csv("../input/traindataset/train.csv") test = pd.read_csv("../input/testdataset/test.csv") train.head() train.isnull().sum() test = test.iloc[:299] train = train.iloc[:664] files = [train, test] t = ["Version", "Album_type", "Album", "Artists", "Labels", "Country"] for j in files: for i in t: j[i] = j[i].fillna("none") print(train.isnull().sum()) print(test.isnull().sum()) Energy = train.Energy.mean() Dancebility = train.Dancebility.mean() Happiness = train.Happiness.mean() train["Energy"] = train["Energy"].fillna(Energy) train["Dancebility"] = train["Dancebility"].fillna(Dancebility) train["Happiness"] = train["Happiness"].fillna(Happiness) test.dropna(subset=["Vocal "], inplace=True) train.isnull().sum() # a = ['Artists', 'Track', 'Version', 'Album', 'Album_type', 'Labels', 'Key', 'Country', 'Vocal '] def binary(x): if x == "M": return 0 elif x == "F": return 1 train["Vocal "] = train["Vocal "].apply(binary) train.isnull().sum() train.head() def tone(x): x = x.split() if x[1] == "Minor": return 0 else: return 1 train["Tone"] = list(map(lambda x: tone(x), train["Key"])) train.head() train["Key"].unique() def key(x): x = x.split() x = x[0] if x == "D♭": return "C#" elif x == "E♭": return "D#" elif x == "G♭": return "F#" elif x == "A♭": return "G#" elif x == "B♭": return "A#" return x train["Key"] = train["Key"].apply(key) train["Key"] train["Key"].unique() train.head() train["Country"] def continents(x): if x in [ "SWE", "GB", "UA", "RUS", "IRL", "NED", "ITL", "FR", "DE", "FIN", "DK", "POR", ]: return "Europe" elif x in ["CA", "USA"]: return "North America" elif x == "AU": return "Australia" elif x in ["KR", "JP", "KZ", "AZE"]: return "Asia" else: return "else" train["Country"] = train["Country"].apply(continents) Genres = [ "Soul", "Country/Folk", "Metal", "Rock", "Pop", "Jazz", "Classic", "Dance", "indie", "rap", "reggae", "other", ] for i in Genres: train[i] = 0 index = -1 for j in train["Artists_Genres"]: j = j.split("|") index += 1 for i in j: if i in ["country", "folk"]: train.loc[index, "Country/Folk"] = 1 elif i in ["folkmetal", "metal", "numetal", "classicmetal", "epicmetal"]: train.loc[index, "Metal"] = 1 elif i == "soul": train.loc[index, "Soul"] = 1 elif i in ["hardrock", "rock", "allrock", "folkrock", "prog", "rnr"]: train.loc[index, "Rock"] = 1 elif i == "pop": train.loc[index, "Pop"] = 1 elif i in ["jazz", "tradjazz", "conjazz"]: train.loc[index, "Jazz"] = 1 elif i in ["classicalmasterpieces", "classical"]: train.loc[index, "Classic"] = 1 elif i in ["dance", "house"]: train.loc[index, "Dance"] = 1 elif i in ["alternative", "indie", "local-indie"]: train.loc[index, "indie"] = 1 elif i in ["rap", "foreignrap"]: train.loc[index, "rap"] = 1 elif i in ["reggae", "reggaeton"]: train.loc[index, "reggae"] = 1 else: train.loc[index, "other"] = 1 train["Release_year"].unique() def year(x): if x < 1990: return 1980 elif x < 2000: return 1990 elif x < 2010: return 2000 elif x < 2020: return 2010 elif x < 2030: return 2020 else: return "1970 or older" train["Release_year"] = train["Release_year"].apply(year) train["Release_year"].unique() train = pd.get_dummies(train, columns=["Version", "Album_type", "Key", "Country"]) train.head() train = pd.get_dummies(train, columns=["Album"]) import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler, normalize from sklearn.decomposition import PCA from sklearn.metrics import silhouette_score Albums = list(train.columns)[32:] Album_data = train.loc[:, Albums] scaler = StandardScaler() scaled_train = scaler.fit_transform(Album_data) # Normalizing the Data normalized_train = normalize(scaled_train) # Converting the numpy array into a pandas DataFrame normalized_train = pd.DataFrame(normalized_train) # Reducing the dimensions of the data pca = PCA(n_components=2) X_principal = pca.fit_transform(normalized_train) X_principal = pd.DataFrame(X_principal) X_principal.columns = ["x", "y"] X_principal.head(2) sse = {} for k in range(1, 10): kmeans = KMeans(n_clusters=k, max_iter=1000).fit(X_principal) sse[ k ] = ( kmeans.inertia_ ) # Inertia: Sum of distances of samples to their closest cluster center plt.figure() plt.plot(list(sse.keys()), list(sse.values())) plt.xlabel("Number of cluster") plt.ylabel("SSE") plt.show() silhouette_scores = [] for n_cluster in range(2, 8): silhouette_scores.append( silhouette_score( X_principal, KMeans(n_clusters=n_cluster).fit_predict(X_principal) ) ) # Plotting a bar graph to compare the results k = [2, 3, 4, 5, 6, 7] plt.bar(k, silhouette_scores) plt.xlabel("Number of clusters", fontsize=10) plt.ylabel("Silhouette Score", fontsize=10) plt.show() kmeans = KMeans(n_clusters=3) kmeans.fit(X_principal) # Visualizing the clustering plt.scatter( X_principal["x"], X_principal["y"], c=KMeans(n_clusters=3).fit_predict(X_principal), cmap=plt.cm.winter, ) plt.show() train["Artist_1"] = X_principal["x"] train["Artist_2"] = X_principal["y"] train = train.drop(columns=Albums) train = train.drop(columns="Artists_Genres") train.head() for i in train["Artists"]: if i is str: i = i.split("|") for j in i: train[j] = 0 index = -1 for j in train["Artists"]: j = j.split("|") index += 1 for i in j: train.loc[index, i] = 1 Artists = list(train.columns)[32:] for i in Artists: train[i] = train[i].fillna(0) train["Avatar"] from sklearn.model_selection import train_test_split X_train2, X_test2, y_train2, y_test2 = train_test_split(X_train, y_train, test_size=0.3) X_train2 = X_train.iloc[:664] y_train2 = y_train.iloc[:664] X_test2 = X_test2.iloc[:299] y_test2 = y_test2.iloc[:299] from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(X_train) X_train = pd.DataFrame( data=scaler.transform(X_train), columns=X_train.columns, index=X_train.index ) scaler.fit(X_test) X_test = pd.DataFrame( data=scaler.transform(X_test), columns=X_test.columns, index=X_test.index ) X_train.head() X_train = X_train.iloc[:664] y_train = y_train.iloc[:664] # # **SVC** from sklearn.svm import SVC svclassifier = SVC(kernel="linear") svclassifier.fit(X_train2, y_train2) y_pred = svclassifier.predict(X_test2) from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test2, y_pred)) print(classification_report(y_test2, y_pred)) # # **Decision Tree** from sklearn.tree import DecisionTreeClassifier decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train2, y_train2) tree_pred = decision_tree.predict(X_test2) print(confusion_matrix(y_test2, tree_pred)) print(classification_report(y_test2, tree_pred)) # # **Random Forest** from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() rf.fit(X_train2, y_train2) rf_pred = rf.predict(X_test2) print(confusion_matrix(y_test2, rf_pred)) print(classification_report(y_test2, rf_pred)) # # **KNeighbors** from sklearn.neighbors import KNeighborsClassifier KN = KNeighborsClassifier() KN.fit(X_train2, y_train2) KN_pred = KN.predict(X_test2) KN_pred print(confusion_matrix(y_test2, KN_pred)) print(classification_report(y_test2, KN_pred)) train_columns = train.columns.values train_columns m = ["classical", "rap", "jazz", "metal", "reggae"] for j in m: for i in train_columns: if i.find(j) != -1: train[j] = 0 del train[i] train.columns.values y_train.value_counts() rf = RandomForestClassifier() rf.fit(X_train, y_train) rf_pred1 = rf.predict(X_test) rf_pred1 decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) tree_pred1 = decision_tree.predict(X_test) tree_pred1
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332562.ipynb
null
null
[{"Id": 69332562, "ScriptId": 18833063, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7932348, "CreationDate": "07/29/2021 16:46:10", "VersionNumber": 6.0, "Title": "Music", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 336.0, "LinesInsertedFromPrevious": 221.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 115.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) train = pd.read_csv("../input/traindataset/train.csv") test = pd.read_csv("../input/testdataset/test.csv") train.head() train.isnull().sum() test = test.iloc[:299] train = train.iloc[:664] files = [train, test] t = ["Version", "Album_type", "Album", "Artists", "Labels", "Country"] for j in files: for i in t: j[i] = j[i].fillna("none") print(train.isnull().sum()) print(test.isnull().sum()) Energy = train.Energy.mean() Dancebility = train.Dancebility.mean() Happiness = train.Happiness.mean() train["Energy"] = train["Energy"].fillna(Energy) train["Dancebility"] = train["Dancebility"].fillna(Dancebility) train["Happiness"] = train["Happiness"].fillna(Happiness) test.dropna(subset=["Vocal "], inplace=True) train.isnull().sum() # a = ['Artists', 'Track', 'Version', 'Album', 'Album_type', 'Labels', 'Key', 'Country', 'Vocal '] def binary(x): if x == "M": return 0 elif x == "F": return 1 train["Vocal "] = train["Vocal "].apply(binary) train.isnull().sum() train.head() def tone(x): x = x.split() if x[1] == "Minor": return 0 else: return 1 train["Tone"] = list(map(lambda x: tone(x), train["Key"])) train.head() train["Key"].unique() def key(x): x = x.split() x = x[0] if x == "D♭": return "C#" elif x == "E♭": return "D#" elif x == "G♭": return "F#" elif x == "A♭": return "G#" elif x == "B♭": return "A#" return x train["Key"] = train["Key"].apply(key) train["Key"] train["Key"].unique() train.head() train["Country"] def continents(x): if x in [ "SWE", "GB", "UA", "RUS", "IRL", "NED", "ITL", "FR", "DE", "FIN", "DK", "POR", ]: return "Europe" elif x in ["CA", "USA"]: return "North America" elif x == "AU": return "Australia" elif x in ["KR", "JP", "KZ", "AZE"]: return "Asia" else: return "else" train["Country"] = train["Country"].apply(continents) Genres = [ "Soul", "Country/Folk", "Metal", "Rock", "Pop", "Jazz", "Classic", "Dance", "indie", "rap", "reggae", "other", ] for i in Genres: train[i] = 0 index = -1 for j in train["Artists_Genres"]: j = j.split("|") index += 1 for i in j: if i in ["country", "folk"]: train.loc[index, "Country/Folk"] = 1 elif i in ["folkmetal", "metal", "numetal", "classicmetal", "epicmetal"]: train.loc[index, "Metal"] = 1 elif i == "soul": train.loc[index, "Soul"] = 1 elif i in ["hardrock", "rock", "allrock", "folkrock", "prog", "rnr"]: train.loc[index, "Rock"] = 1 elif i == "pop": train.loc[index, "Pop"] = 1 elif i in ["jazz", "tradjazz", "conjazz"]: train.loc[index, "Jazz"] = 1 elif i in ["classicalmasterpieces", "classical"]: train.loc[index, "Classic"] = 1 elif i in ["dance", "house"]: train.loc[index, "Dance"] = 1 elif i in ["alternative", "indie", "local-indie"]: train.loc[index, "indie"] = 1 elif i in ["rap", "foreignrap"]: train.loc[index, "rap"] = 1 elif i in ["reggae", "reggaeton"]: train.loc[index, "reggae"] = 1 else: train.loc[index, "other"] = 1 train["Release_year"].unique() def year(x): if x < 1990: return 1980 elif x < 2000: return 1990 elif x < 2010: return 2000 elif x < 2020: return 2010 elif x < 2030: return 2020 else: return "1970 or older" train["Release_year"] = train["Release_year"].apply(year) train["Release_year"].unique() train = pd.get_dummies(train, columns=["Version", "Album_type", "Key", "Country"]) train.head() train = pd.get_dummies(train, columns=["Album"]) import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler, normalize from sklearn.decomposition import PCA from sklearn.metrics import silhouette_score Albums = list(train.columns)[32:] Album_data = train.loc[:, Albums] scaler = StandardScaler() scaled_train = scaler.fit_transform(Album_data) # Normalizing the Data normalized_train = normalize(scaled_train) # Converting the numpy array into a pandas DataFrame normalized_train = pd.DataFrame(normalized_train) # Reducing the dimensions of the data pca = PCA(n_components=2) X_principal = pca.fit_transform(normalized_train) X_principal = pd.DataFrame(X_principal) X_principal.columns = ["x", "y"] X_principal.head(2) sse = {} for k in range(1, 10): kmeans = KMeans(n_clusters=k, max_iter=1000).fit(X_principal) sse[ k ] = ( kmeans.inertia_ ) # Inertia: Sum of distances of samples to their closest cluster center plt.figure() plt.plot(list(sse.keys()), list(sse.values())) plt.xlabel("Number of cluster") plt.ylabel("SSE") plt.show() silhouette_scores = [] for n_cluster in range(2, 8): silhouette_scores.append( silhouette_score( X_principal, KMeans(n_clusters=n_cluster).fit_predict(X_principal) ) ) # Plotting a bar graph to compare the results k = [2, 3, 4, 5, 6, 7] plt.bar(k, silhouette_scores) plt.xlabel("Number of clusters", fontsize=10) plt.ylabel("Silhouette Score", fontsize=10) plt.show() kmeans = KMeans(n_clusters=3) kmeans.fit(X_principal) # Visualizing the clustering plt.scatter( X_principal["x"], X_principal["y"], c=KMeans(n_clusters=3).fit_predict(X_principal), cmap=plt.cm.winter, ) plt.show() train["Artist_1"] = X_principal["x"] train["Artist_2"] = X_principal["y"] train = train.drop(columns=Albums) train = train.drop(columns="Artists_Genres") train.head() for i in train["Artists"]: if i is str: i = i.split("|") for j in i: train[j] = 0 index = -1 for j in train["Artists"]: j = j.split("|") index += 1 for i in j: train.loc[index, i] = 1 Artists = list(train.columns)[32:] for i in Artists: train[i] = train[i].fillna(0) train["Avatar"] from sklearn.model_selection import train_test_split X_train2, X_test2, y_train2, y_test2 = train_test_split(X_train, y_train, test_size=0.3) X_train2 = X_train.iloc[:664] y_train2 = y_train.iloc[:664] X_test2 = X_test2.iloc[:299] y_test2 = y_test2.iloc[:299] from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(X_train) X_train = pd.DataFrame( data=scaler.transform(X_train), columns=X_train.columns, index=X_train.index ) scaler.fit(X_test) X_test = pd.DataFrame( data=scaler.transform(X_test), columns=X_test.columns, index=X_test.index ) X_train.head() X_train = X_train.iloc[:664] y_train = y_train.iloc[:664] # # **SVC** from sklearn.svm import SVC svclassifier = SVC(kernel="linear") svclassifier.fit(X_train2, y_train2) y_pred = svclassifier.predict(X_test2) from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test2, y_pred)) print(classification_report(y_test2, y_pred)) # # **Decision Tree** from sklearn.tree import DecisionTreeClassifier decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train2, y_train2) tree_pred = decision_tree.predict(X_test2) print(confusion_matrix(y_test2, tree_pred)) print(classification_report(y_test2, tree_pred)) # # **Random Forest** from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() rf.fit(X_train2, y_train2) rf_pred = rf.predict(X_test2) print(confusion_matrix(y_test2, rf_pred)) print(classification_report(y_test2, rf_pred)) # # **KNeighbors** from sklearn.neighbors import KNeighborsClassifier KN = KNeighborsClassifier() KN.fit(X_train2, y_train2) KN_pred = KN.predict(X_test2) KN_pred print(confusion_matrix(y_test2, KN_pred)) print(classification_report(y_test2, KN_pred)) train_columns = train.columns.values train_columns m = ["classical", "rap", "jazz", "metal", "reggae"] for j in m: for i in train_columns: if i.find(j) != -1: train[j] = 0 del train[i] train.columns.values y_train.value_counts() rf = RandomForestClassifier() rf.fit(X_train, y_train) rf_pred1 = rf.predict(X_test) rf_pred1 decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) tree_pred1 = decision_tree.predict(X_test) tree_pred1
false
0
2,954
0
2,954
2,954
69332353
<jupyter_start><jupyter_text>SBI_Historical_Data Kaggle dataset identifier: sbi-historical-data <jupyter_code>import pandas as pd df = pd.read_csv('sbi-historical-data/SBI_Historical_Data.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 1385 entries, 0 to 1384 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Date 1385 non-null object 1 Price 1385 non-null float64 2 Open 1385 non-null float64 3 High 1385 non-null float64 4 Low 1385 non-null float64 5 Vol. 1385 non-null object 6 Change % 1385 non-null object dtypes: float64(4), object(3) memory usage: 75.9+ KB <jupyter_text>Examples: { "Date": "2020-08-07 00:00:00", "Price": 190.65, "Open": 191.45, "High": 192.1, "Low": 189.55, "Vol.": "44.82M", "Change %": "-0.16%" } { "Date": "2020-08-06 00:00:00", "Price": 190.95, "Open": 192.3, "High": 194.5, "Low": 190.25, "Vol.": "59.74M", "Change %": "-0.26%" } { "Date": "2020-08-05 00:00:00", "Price": 191.45, "Open": 192.75, "High": 196.85, "Low": 191.0, "Vol.": "68.89M", "Change %": "-0.08%" } { "Date": "2020-08-04 00:00:00", "Price": 191.6, "Open": 193.35, "High": 193.8, "Low": 190.5, "Vol.": "43.87M", "Change %": "-0.34%" } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Question -1 # Reading data as a dataframe sbi = pd.read_csv("../input/sbi-historical-data/SBI_Historical_Data.csv") # to see the sample data. (first two columns) sbi.head(2) # printing dimentions of dataframe/dataset sbi.shape # to know the type of data stored in differet columns sbi.info() # sbi.describe() # manual statistical summary of numeric columns - Pricing print("Mean of Price:", sbi["Price"].mean()) # finding the mean print("Median of Price:", sbi["Price"].median()) # median of Price column print("Mode of Price:", sbi["Price"].mode()) # finding mode of Price column print("Standard-deviation of Price:", sbi["Price"].std()) print("Min of Price:", min(sbi["Price"])) print("Max of Price:", max(sbi["Price"])) print("Quartiles of Price:\n:", sbi["Price"].quantile([0.25, 0.5, 0.75])) # statistical summary of Price, Open, High, Low list = ["Price", "Open", "High", "Low"] for i in list: print("\n\n\n Summary of ", i) print("*********************************************") print("Mean of ", i, ":", sbi[i].mean()) # finding the mean print("Median of", i, ":", sbi[i].median()) # median of Price column print("Mode of", i, ":", sbi[i].mode()) # finding mode of Price column print("Standard-deviation of", i, ":", sbi[i].std()) print("Min of ", i, ":", min(sbi[i])) print("Max of ", i, ":", max(sbi[i])) print("Quartiles of", i, ":\n", sbi[i].quantile([0.25, 0.5, 0.75])) # changing the date to : YYYY-MM-DD -> format sbi["Date"] = pd.DataFrame(pd.to_datetime(sbi["Date"])) # d: sbi dataframe after chaing the date sbi # cheking the data types sbi.dtypes # checking the missing values sbi.isnull() # # Renaming the Columns # 1. "vol." to "Vol" # 2. "Change %" to "Change" # t sbi = sbi.rename(columns={"Vol.": "Vol", "Change %": "Change"}) sbi # Removing the % symbol from Change column sbi["Change"] = sbi["Change"].str.replace(r"%", "") sbi # converting Change datatype from 'String' to 'float type' sbi["Change"] = sbi["Change"].astype(float) sbi.dtypes import matplotlib.pyplot as plt plt.plot(sbi.Change) plt.show() # ADF import pandas as pd import numpy as np import matplotlib.pyplot as plt import statsmodels.api as sm from statsmodels.tsa.stattools import adfuller x = sbi["Change"].values result = adfuller(x) print("ADF Statistic: %f" % result[0]) print("p-value: %f" % result[1]) print("Critical Values:") for key, value in result[4].items(): print("\t%s: %.3f" % (key, value)) if result[0] < result[4]["5%"]: print("Reject Ho - Time Series is Stationary") else: print("Failed to Reject Ho - Time Series is Non-Stationary") sbi["Diff_Change"] = sbi["Change"].diff() sbi diff_Change = sbi["Diff_Change"].values Y = diff_Change[~np.isnan(diff_Change)] Y ts_Change_orig = sbi["Change"].values ts_Change = ts_Change_orig[:-1] X = sm.add_constant(ts_Change) model = sm.OLS(Y, X) results = model.fit() print(results.summary()) results.tvalues[1]
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332353.ipynb
sbi-historical-data
priyadelhi
[{"Id": 69332353, "ScriptId": 18920182, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4674000, "CreationDate": "07/29/2021 16:42:50", "VersionNumber": 1.0, "Title": "SBI_Historical_Data", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 146.0, "LinesInsertedFromPrevious": 146.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 92370145, "KernelVersionId": 69332353, "SourceDatasetVersionId": 2035687}]
[{"Id": 2035687, "DatasetId": 1218991, "DatasourceVersionId": 2075552, "CreatorUserId": 6605353, "LicenseName": "Unknown", "CreationDate": "03/18/2021 10:57:29", "VersionNumber": 1.0, "Title": "SBI_Historical_Data", "Slug": "sbi-historical-data", "Subtitle": "Forecasting Stock Price", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1218991, "CreatorUserId": 6605353, "OwnerUserId": 6605353.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2035687.0, "CurrentDatasourceVersionId": 2075552.0, "ForumId": 1237090, "Type": 2, "CreationDate": "03/18/2021 10:57:29", "LastActivityDate": "03/18/2021", "TotalViews": 3641, "TotalDownloads": 92, "TotalVotes": 2, "TotalKernels": 0}]
[{"Id": 6605353, "UserName": "priyadelhi", "DisplayName": "Priya", "RegisterDate": "01/27/2021", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Question -1 # Reading data as a dataframe sbi = pd.read_csv("../input/sbi-historical-data/SBI_Historical_Data.csv") # to see the sample data. (first two columns) sbi.head(2) # printing dimentions of dataframe/dataset sbi.shape # to know the type of data stored in differet columns sbi.info() # sbi.describe() # manual statistical summary of numeric columns - Pricing print("Mean of Price:", sbi["Price"].mean()) # finding the mean print("Median of Price:", sbi["Price"].median()) # median of Price column print("Mode of Price:", sbi["Price"].mode()) # finding mode of Price column print("Standard-deviation of Price:", sbi["Price"].std()) print("Min of Price:", min(sbi["Price"])) print("Max of Price:", max(sbi["Price"])) print("Quartiles of Price:\n:", sbi["Price"].quantile([0.25, 0.5, 0.75])) # statistical summary of Price, Open, High, Low list = ["Price", "Open", "High", "Low"] for i in list: print("\n\n\n Summary of ", i) print("*********************************************") print("Mean of ", i, ":", sbi[i].mean()) # finding the mean print("Median of", i, ":", sbi[i].median()) # median of Price column print("Mode of", i, ":", sbi[i].mode()) # finding mode of Price column print("Standard-deviation of", i, ":", sbi[i].std()) print("Min of ", i, ":", min(sbi[i])) print("Max of ", i, ":", max(sbi[i])) print("Quartiles of", i, ":\n", sbi[i].quantile([0.25, 0.5, 0.75])) # changing the date to : YYYY-MM-DD -> format sbi["Date"] = pd.DataFrame(pd.to_datetime(sbi["Date"])) # d: sbi dataframe after chaing the date sbi # cheking the data types sbi.dtypes # checking the missing values sbi.isnull() # # Renaming the Columns # 1. "vol." to "Vol" # 2. "Change %" to "Change" # t sbi = sbi.rename(columns={"Vol.": "Vol", "Change %": "Change"}) sbi # Removing the % symbol from Change column sbi["Change"] = sbi["Change"].str.replace(r"%", "") sbi # converting Change datatype from 'String' to 'float type' sbi["Change"] = sbi["Change"].astype(float) sbi.dtypes import matplotlib.pyplot as plt plt.plot(sbi.Change) plt.show() # ADF import pandas as pd import numpy as np import matplotlib.pyplot as plt import statsmodels.api as sm from statsmodels.tsa.stattools import adfuller x = sbi["Change"].values result = adfuller(x) print("ADF Statistic: %f" % result[0]) print("p-value: %f" % result[1]) print("Critical Values:") for key, value in result[4].items(): print("\t%s: %.3f" % (key, value)) if result[0] < result[4]["5%"]: print("Reject Ho - Time Series is Stationary") else: print("Failed to Reject Ho - Time Series is Non-Stationary") sbi["Diff_Change"] = sbi["Change"].diff() sbi diff_Change = sbi["Diff_Change"].values Y = diff_Change[~np.isnan(diff_Change)] Y ts_Change_orig = sbi["Change"].values ts_Change = ts_Change_orig[:-1] X = sm.add_constant(ts_Change) model = sm.OLS(Y, X) results = model.fit() print(results.summary()) results.tvalues[1]
[{"sbi-historical-data/SBI_Historical_Data.csv": {"column_names": "[\"Date\", \"Price\", \"Open\", \"High\", \"Low\", \"Vol.\", \"Change %\"]", "column_data_types": "{\"Date\": \"object\", \"Price\": \"float64\", \"Open\": \"float64\", \"High\": \"float64\", \"Low\": \"float64\", \"Vol.\": \"object\", \"Change %\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1385 entries, 0 to 1384\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Date 1385 non-null object \n 1 Price 1385 non-null float64\n 2 Open 1385 non-null float64\n 3 High 1385 non-null float64\n 4 Low 1385 non-null float64\n 5 Vol. 1385 non-null object \n 6 Change % 1385 non-null object \ndtypes: float64(4), object(3)\nmemory usage: 75.9+ KB\n", "summary": "{\"Price\": {\"count\": 1385.0, \"mean\": 266.27440433213, \"std\": 45.555277315383385, \"min\": 150.85, \"25%\": 245.65, \"50%\": 270.8, \"75%\": 296.15, \"max\": 372.4}, \"Open\": {\"count\": 1385.0, \"mean\": 266.9032129963899, \"std\": 45.59066403930653, \"min\": 151.95, \"25%\": 245.9, \"50%\": 271.15, \"75%\": 296.5, \"max\": 371.95}, \"High\": {\"count\": 1385.0, \"mean\": 270.3436823104693, \"std\": 45.82974541919274, \"min\": 153.2, \"25%\": 248.8, \"50%\": 274.6, \"75%\": 300.75, \"max\": 373.8}, \"Low\": {\"count\": 1385.0, \"mean\": 262.75790613718414, \"std\": 45.248334254843066, \"min\": 148.25, \"25%\": 242.55, \"50%\": 267.4, \"75%\": 292.2, \"max\": 366.2}}", "examples": "{\"Date\":{\"0\":\"Aug 07, 2020\",\"1\":\"Aug 06, 2020\",\"2\":\"Aug 05, 2020\",\"3\":\"Aug 04, 2020\"},\"Price\":{\"0\":190.65,\"1\":190.95,\"2\":191.45,\"3\":191.6},\"Open\":{\"0\":191.45,\"1\":192.3,\"2\":192.75,\"3\":193.35},\"High\":{\"0\":192.1,\"1\":194.5,\"2\":196.85,\"3\":193.8},\"Low\":{\"0\":189.55,\"1\":190.25,\"2\":191.0,\"3\":190.5},\"Vol.\":{\"0\":\"44.82M\",\"1\":\"59.74M\",\"2\":\"68.89M\",\"3\":\"43.87M\"},\"Change %\":{\"0\":\"-0.16%\",\"1\":\"-0.26%\",\"2\":\"-0.08%\",\"3\":\"-0.34%\"}}"}}]
true
1
<start_data_description><data_path>sbi-historical-data/SBI_Historical_Data.csv: <column_names> ['Date', 'Price', 'Open', 'High', 'Low', 'Vol.', 'Change %'] <column_types> {'Date': 'object', 'Price': 'float64', 'Open': 'float64', 'High': 'float64', 'Low': 'float64', 'Vol.': 'object', 'Change %': 'object'} <dataframe_Summary> {'Price': {'count': 1385.0, 'mean': 266.27440433213, 'std': 45.555277315383385, 'min': 150.85, '25%': 245.65, '50%': 270.8, '75%': 296.15, 'max': 372.4}, 'Open': {'count': 1385.0, 'mean': 266.9032129963899, 'std': 45.59066403930653, 'min': 151.95, '25%': 245.9, '50%': 271.15, '75%': 296.5, 'max': 371.95}, 'High': {'count': 1385.0, 'mean': 270.3436823104693, 'std': 45.82974541919274, 'min': 153.2, '25%': 248.8, '50%': 274.6, '75%': 300.75, 'max': 373.8}, 'Low': {'count': 1385.0, 'mean': 262.75790613718414, 'std': 45.248334254843066, 'min': 148.25, '25%': 242.55, '50%': 267.4, '75%': 292.2, 'max': 366.2}} <dataframe_info> RangeIndex: 1385 entries, 0 to 1384 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Date 1385 non-null object 1 Price 1385 non-null float64 2 Open 1385 non-null float64 3 High 1385 non-null float64 4 Low 1385 non-null float64 5 Vol. 1385 non-null object 6 Change % 1385 non-null object dtypes: float64(4), object(3) memory usage: 75.9+ KB <some_examples> {'Date': {'0': 'Aug 07, 2020', '1': 'Aug 06, 2020', '2': 'Aug 05, 2020', '3': 'Aug 04, 2020'}, 'Price': {'0': 190.65, '1': 190.95, '2': 191.45, '3': 191.6}, 'Open': {'0': 191.45, '1': 192.3, '2': 192.75, '3': 193.35}, 'High': {'0': 192.1, '1': 194.5, '2': 196.85, '3': 193.8}, 'Low': {'0': 189.55, '1': 190.25, '2': 191.0, '3': 190.5}, 'Vol.': {'0': '44.82M', '1': '59.74M', '2': '68.89M', '3': '43.87M'}, 'Change %': {'0': '-0.16%', '1': '-0.26%', '2': '-0.08%', '3': '-0.34%'}} <end_description>
1,178
1
1,841
1,178
69332985
<jupyter_start><jupyter_text>Fake News ### Context Fake news has become one of the biggest problems of our age. It has serious impact on our online as well as offline discourse. One can even go as far as saying that, to date, fake news poses a clear and present danger to western democracy and stability of the society. ### Content What's inside is more than just rows and columns. Make it easy for others to get started by describing how you acquired the data and what time period it represents, too. Kaggle dataset identifier: textdb3 <jupyter_code>import pandas as pd df = pd.read_csv('textdb3/fake_or_real_news.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 6335 entries, 0 to 6334 Data columns (total 4 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 6335 non-null int64 1 title 6335 non-null object 2 text 6335 non-null object 3 label 6335 non-null object dtypes: int64(1), object(3) memory usage: 198.1+ KB <jupyter_text>Examples: { "Unnamed: 0": 8476, "title": "You Can Smell Hillary\u2019s Fear", "text": "Daniel Greenfield, a Shillman Journalism Fellow at the Freedom Center, is a New York writer focusing on radical Islam. \nIn the final stretch of the election, Hillary Rodham Clinton has gone to war with the FBI. \nThe word \u201cunprecedented\u201d has been thrown around so often this e...(truncated)", "label": "FAKE" } { "Unnamed: 0": 10294, "title": "Watch The Exact Moment Paul Ryan Committed Political Suicide At A Trump Rally (VIDEO)", "text": "Google Pinterest Digg Linkedin Reddit Stumbleupon Print Delicious Pocket Tumblr \nThere are two fundamental truths in this world: Paul Ryan desperately wants to be president. And Paul Ryan will never be president. Today proved it. \nIn a particularly staggering example of political cowa...(truncated)", "label": "FAKE" } { "Unnamed: 0": 3608, "title": "Kerry to go to Paris in gesture of sympathy", "text": "U.S. Secretary of State John F. Kerry said Monday that he will stop in Paris later this week, amid criticism that no top American officials attended Sunday\u2019s unity march against terrorism.\n\nKerry said he expects to arrive in Paris Thursday evening, as he heads home after a week a...(truncated)", "label": "REAL" } { "Unnamed: 0": 10142, "title": "Bernie supporters on Twitter erupt in anger against the DNC: 'We tried to warn you!'", "text": "\u2014 Kaydee King (@KaydeeKing) November 9, 2016 The lesson from tonight's Dem losses: Time for Democrats to start listening to the voters. Stop running the same establishment candidates. \n\u2014 People For Bernie (@People4Bernie) November 9, 2016 If Dems didn't want a tight race they...(truncated)", "label": "FAKE" } <jupyter_script># # Fake News With Python # This advanced python project of detecting fake news deals with fake and real news. Using sklearn, we build a TfidfVectorizer on our dataset. Then, we initialize a PassiveAggressive Classifier and fit the model. In the end, the accuracy score and the confusion matrix tell us how well our model fares. import numpy as np import pandas as pd import itertools from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.metrics import accuracy_score, confusion_matrix import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Read Data df = pd.read_csv("../input/textdb3/fake_or_real_news.csv") df.head() # get labels from database labels = df.label labels.head() # split dataset x_train, x_test, y_train, y_test = train_test_split( df["text"], labels, test_size=0.2, random_state=7 ) # """Initialize TfidfVectorizer with stop words and max docs frequency of 0.7""" # initialize TfidfVectorizer tfidf_vect = TfidfVectorizer(stop_words="english", max_df=0.7) # fit and transform train set and test set tfidf_train = tfidf_vect.fit_transform(x_train) tfidf_test = tfidf_vect.transform(x_test) # #### Fit the tfidf_train and y_train and then preidct the test set # # initialize a passiveAgressiveCliassifier pac = PassiveAggressiveClassifier(max_iter=50) pac.fit(tfidf_train, y_train) # predict on test set and calculate accuracy y_pred = pac.predict(tfidf_test) score = accuracy_score(y_test, y_pred) print(f"Accuracy: {round(score*100, 2)}%") # confusion matrix confusion_matrix(y_test, y_pred, labels=["FAKE", "REAL"])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332985.ipynb
textdb3
hassanamin
[{"Id": 69332985, "ScriptId": 18927475, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3340191, "CreationDate": "07/29/2021 16:52:38", "VersionNumber": 1.0, "Title": "Fake News Detector", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 63.0, "LinesInsertedFromPrevious": 63.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 92371580, "KernelVersionId": 69332985, "SourceDatasetVersionId": 310019}]
[{"Id": 310019, "DatasetId": 129603, "DatasourceVersionId": 323018, "CreatorUserId": 1962508, "LicenseName": "GNU Lesser General Public License 3.0", "CreationDate": "03/01/2019 10:09:42", "VersionNumber": 1.0, "Title": "Fake News", "Slug": "textdb3", "Subtitle": "Balanced dataset for fake news analysis", "Description": "### Context\n\nFake news has become one of the biggest problems of our age. It has serious impact on our online as well as offline discourse. One can even go as far as saying that, to date, fake news poses a clear and present danger to western democracy and stability of the society.\n\n### Content\n\nWhat's inside is more than just rows and columns. Make it easy for others to get started by describing how you acquired the data and what time period it represents, too.\n\n\n### Acknowledgements\n\nWe wouldn't be here without the help of others. If you owe any attributions or thanks, include them here along with any citations of past research.\n\n\n### Inspiration\n\nYour data will be in front of the world's largest data science community. What questions do you want to see answered?", "VersionNotes": "Initial release", "TotalCompressedBytes": 30696129.0, "TotalUncompressedBytes": 11877334.0}]
[{"Id": 129603, "CreatorUserId": 1962508, "OwnerUserId": 1962508.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 310019.0, "CurrentDatasourceVersionId": 323018.0, "ForumId": 139714, "Type": 2, "CreationDate": "03/01/2019 10:09:42", "LastActivityDate": "03/01/2019", "TotalViews": 37686, "TotalDownloads": 7757, "TotalVotes": 85, "TotalKernels": 61}]
[{"Id": 1962508, "UserName": "hassanamin", "DisplayName": "Hassan Amin", "RegisterDate": "06/03/2018", "PerformanceTier": 2}]
# # Fake News With Python # This advanced python project of detecting fake news deals with fake and real news. Using sklearn, we build a TfidfVectorizer on our dataset. Then, we initialize a PassiveAggressive Classifier and fit the model. In the end, the accuracy score and the confusion matrix tell us how well our model fares. import numpy as np import pandas as pd import itertools from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.metrics import accuracy_score, confusion_matrix import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Read Data df = pd.read_csv("../input/textdb3/fake_or_real_news.csv") df.head() # get labels from database labels = df.label labels.head() # split dataset x_train, x_test, y_train, y_test = train_test_split( df["text"], labels, test_size=0.2, random_state=7 ) # """Initialize TfidfVectorizer with stop words and max docs frequency of 0.7""" # initialize TfidfVectorizer tfidf_vect = TfidfVectorizer(stop_words="english", max_df=0.7) # fit and transform train set and test set tfidf_train = tfidf_vect.fit_transform(x_train) tfidf_test = tfidf_vect.transform(x_test) # #### Fit the tfidf_train and y_train and then preidct the test set # # initialize a passiveAgressiveCliassifier pac = PassiveAggressiveClassifier(max_iter=50) pac.fit(tfidf_train, y_train) # predict on test set and calculate accuracy y_pred = pac.predict(tfidf_test) score = accuracy_score(y_test, y_pred) print(f"Accuracy: {round(score*100, 2)}%") # confusion matrix confusion_matrix(y_test, y_pred, labels=["FAKE", "REAL"])
[{"textdb3/fake_or_real_news.csv": {"column_names": "[\"Unnamed: 0\", \"title\", \"text\", \"label\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"title\": \"object\", \"text\": \"object\", \"label\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6335 entries, 0 to 6334\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 6335 non-null int64 \n 1 title 6335 non-null object\n 2 text 6335 non-null object\n 3 label 6335 non-null object\ndtypes: int64(1), object(3)\nmemory usage: 198.1+ KB\n", "summary": "{\"Unnamed: 0\": {\"count\": 6335.0, \"mean\": 5280.4156274664565, \"std\": 3038.503952617909, \"min\": 2.0, \"25%\": 2674.5, \"50%\": 5271.0, \"75%\": 7901.0, \"max\": 10557.0}}", "examples": "{\"Unnamed: 0\":{\"0\":8476,\"1\":10294,\"2\":3608,\"3\":10142},\"title\":{\"0\":\"You Can Smell Hillary\\u2019s Fear\",\"1\":\"Watch The Exact Moment Paul Ryan Committed Political Suicide At A Trump Rally (VIDEO)\",\"2\":\"Kerry to go to Paris in gesture of sympathy\",\"3\":\"Bernie supporters on Twitter erupt in anger against the DNC: 'We tried to warn you!'\"},\"text\":{\"0\":\"Daniel Greenfield, a Shillman Journalism Fellow at the Freedom Center, is a New York writer focusing on radical Islam. \\nIn the final stretch of the election, Hillary Rodham Clinton has gone to war with the FBI. \\nThe word \\u201cunprecedented\\u201d has been thrown around so often this election that it ought to be retired. But it\\u2019s still unprecedented for the nominee of a major political party to go war with the FBI. \\nBut that\\u2019s exactly what Hillary and her people have done. Coma patients just waking up now and watching an hour of CNN from their hospital beds would assume that FBI Director James Comey is Hillary\\u2019s opponent in this election. \\nThe FBI is under attack by everyone from Obama to CNN. Hillary\\u2019s people have circulated a letter attacking Comey. There are currently more media hit pieces lambasting him than targeting Trump. It wouldn\\u2019t be too surprising if the Clintons or their allies were to start running attack ads against the FBI. \\nThe FBI\\u2019s leadership is being warned that the entire left-wing establishment will form a lynch mob if they continue going after Hillary. And the FBI\\u2019s credibility is being attacked by the media and the Democrats to preemptively head off the results of the investigation of the Clinton Foundation and Hillary Clinton. \\nThe covert struggle between FBI agents and Obama\\u2019s DOJ people has gone explosively public. \\nThe New York Times has compared Comey to J. Edgar Hoover. Its bizarre headline, \\u201cJames Comey Role Recalls Hoover\\u2019s FBI, Fairly or Not\\u201d practically admits up front that it\\u2019s spouting nonsense. The Boston Globe has published a column calling for Comey\\u2019s resignation. Not to be outdone, Time has an editorial claiming that the scandal is really an attack on all women. \\nJames Carville appeared on MSNBC to remind everyone that he was still alive and insane. He accused Comey of coordinating with House Republicans and the KGB. And you thought the \\u201cvast right wing conspiracy\\u201d was a stretch. \\nCountless media stories charge Comey with violating procedure. Do you know what\\u2019s a procedural violation? Emailing classified information stored on your bathroom server. \\nSenator Harry Reid has sent Comey a letter accusing him of violating the Hatch Act. The Hatch Act is a nice idea that has as much relevance in the age of Obama as the Tenth Amendment. But the cable news spectrum quickly filled with media hacks glancing at the Wikipedia article on the Hatch Act under the table while accusing the FBI director of one of the most awkward conspiracies against Hillary ever. \\nIf James Comey is really out to hurt Hillary, he picked one hell of a strange way to do it. \\nNot too long ago Democrats were breathing a sigh of relief when he gave Hillary Clinton a pass in a prominent public statement. If he really were out to elect Trump by keeping the email scandal going, why did he trash the investigation? Was he on the payroll of House Republicans and the KGB back then and playing it coy or was it a sudden development where Vladimir Putin and Paul Ryan talked him into taking a look at Anthony Weiner\\u2019s computer? \\nEither Comey is the most cunning FBI director that ever lived or he\\u2019s just awkwardly trying to navigate a political mess that has trapped him between a DOJ leadership whose political futures are tied to Hillary\\u2019s victory and his own bureau whose apolitical agents just want to be allowed to do their jobs. \\nThe only truly mysterious thing is why Hillary and her associates decided to go to war with a respected Federal agency. Most Americans like the FBI while Hillary Clinton enjoys a 60% unfavorable rating. \\nAnd it\\u2019s an interesting question. \\nHillary\\u2019s old strategy was to lie and deny that the FBI even had a criminal investigation underway. Instead her associates insisted that it was a security review. The FBI corrected her and she shrugged it off. But the old breezy denial approach has given way to a savage assault on the FBI. \\nPretending that nothing was wrong was a bad strategy, but it was a better one that picking a fight with the FBI while lunatic Clinton associates try to claim that the FBI is really the KGB. \\nThere are two possible explanations. \\nHillary Clinton might be arrogant enough to lash out at the FBI now that she believes that victory is near. The same kind of hubris that led her to plan her victory fireworks display could lead her to declare a war on the FBI for irritating her during the final miles of her campaign. \\nBut the other explanation is that her people panicked. \\nGoing to war with the FBI is not the behavior of a smart and focused presidential campaign. It\\u2019s an act of desperation. When a presidential candidate decides that her only option is to try and destroy the credibility of the FBI, that\\u2019s not hubris, it\\u2019s fear of what the FBI might be about to reveal about her. \\nDuring the original FBI investigation, Hillary Clinton was confident that she could ride it out. And she had good reason for believing that. But that Hillary Clinton is gone. In her place is a paranoid wreck. Within a short space of time the \\u201cpositive\\u201d Clinton campaign promising to unite the country has been replaced by a desperate and flailing operation that has focused all its energy on fighting the FBI. \\nThere\\u2019s only one reason for such bizarre behavior. \\nThe Clinton campaign has decided that an FBI investigation of the latest batch of emails poses a threat to its survival. And so it\\u2019s gone all in on fighting the FBI. It\\u2019s an unprecedented step born of fear. It\\u2019s hard to know whether that fear is justified. But the existence of that fear already tells us a whole lot. \\nClinton loyalists rigged the old investigation. They knew the outcome ahead of time as well as they knew the debate questions. Now suddenly they are no longer in control. And they are afraid. \\nYou can smell the fear. \\nThe FBI has wiretaps from the investigation of the Clinton Foundation. It\\u2019s finding new emails all the time. And Clintonworld panicked. The spinmeisters of Clintonworld have claimed that the email scandal is just so much smoke without fire. All that\\u2019s here is the appearance of impropriety without any of the substance. But this isn\\u2019t how you react to smoke. It\\u2019s how you respond to a fire. \\nThe misguided assault on the FBI tells us that Hillary Clinton and her allies are afraid of a revelation bigger than the fundamental illegality of her email setup. The email setup was a preemptive cover up. The Clinton campaign has panicked badly out of the belief, right or wrong, that whatever crime the illegal setup was meant to cover up is at risk of being exposed. \\nThe Clintons have weathered countless scandals over the years. Whatever they are protecting this time around is bigger than the usual corruption, bribery, sexual assaults and abuses of power that have followed them around throughout the years. This is bigger and more damaging than any of the allegations that have already come out. And they don\\u2019t want FBI investigators anywhere near it. \\nThe campaign against Comey is pure intimidation. It\\u2019s also a warning. Any senior FBI people who value their careers are being warned to stay away. The Democrats are closing ranks around their nominee against the FBI. It\\u2019s an ugly and unprecedented scene. It may also be their last stand. \\nHillary Clinton has awkwardly wound her way through numerous scandals in just this election cycle. But she\\u2019s never shown fear or desperation before. Now that has changed. Whatever she is afraid of, it lies buried in her emails with Huma Abedin. And it can bring her down like nothing else has. \",\"1\":\"Google Pinterest Digg Linkedin Reddit Stumbleupon Print Delicious Pocket Tumblr \\nThere are two fundamental truths in this world: Paul Ryan desperately wants to be president. And Paul Ryan will never be president. Today proved it. \\nIn a particularly staggering example of political cowardice, Paul Ryan re-re-re-reversed course and announced that he was back on the Trump Train after all. This was an aboutface from where he was a few weeks ago. He had previously declared he would not be supporting or defending Trump after a tape was made public in which Trump bragged about assaulting women. Suddenly, Ryan was appearing at a pro-Trump rally and boldly declaring that he already sent in his vote to make him President of the United States. It was a surreal moment. The figurehead of the Republican Party dosed himself in gasoline, got up on a stage on a chilly afternoon in Wisconsin, and lit a match. . @SpeakerRyan says he voted for @realDonaldTrump : \\u201cRepublicans, it is time to come home\\u201d https:\\/\\/t.co\\/VyTT49YvoE pic.twitter.com\\/wCvSCg4a5I \\n\\u2014 ABC News Politics (@ABCPolitics) November 5, 2016 \\nThe Democratic Party couldn\\u2019t have asked for a better moment of film. Ryan\\u2019s chances of ever becoming president went down to zero in an instant. In the wreckage Trump is to leave behind in his wake, those who cravenly backed his campaign will not recover. If Ryan\\u2019s career manages to limp all the way to 2020, then the DNC will have this tape locked and loaded to be used in every ad until Election Day. \\nThe ringing endorsement of the man he clearly hates on a personal level speaks volumes about his own spinelessness. Ryan has postured himself as a \\u201cprincipled\\u201d conservative, and one uncomfortable with Trump\\u2019s unapologetic bigotry and sexism. However, when push came to shove, Paul Ryan \\u2013 like many of his colleagues \\u2013 turned into a sniveling appeaser. After all his lofty tak about conviction, his principles were a house of cards and collapsed with the slightest breeze. \\nWhat\\u2019s especially bizarre is how close Ryan came to making it through unscathed. For months the Speaker of the House refused to comment on Trump at all. His strategy seemed to be to keep his head down, pretend Trump didn\\u2019t exist, and hope that nobody remembered what happened in 2016. Now, just days away from the election, he screwed it all up. \\nIf 2016\\u2019s very ugly election has done any good it\\u2019s by exposing the utter cowardice of the Republicans who once feigned moral courage. A reality television star spit on them, hijacked their party, insulted their wives, and got every last one of them to kneel before him. What a turn of events. \\nFeatured image via Twitter\",\"2\":\"U.S. Secretary of State John F. Kerry said Monday that he will stop in Paris later this week, amid criticism that no top American officials attended Sunday\\u2019s unity march against terrorism.\\n\\nKerry said he expects to arrive in Paris Thursday evening, as he heads home after a week abroad. He said he will fly to France at the conclusion of a series of meetings scheduled for Thursday in Sofia, Bulgaria. He plans to meet the next day with Foreign Minister Laurent Fabius and President Francois Hollande, then return to Washington.\\n\\nThe visit by Kerry, who has family and childhood ties to the country and speaks fluent French, could address some of the criticism that the United States snubbed France in its darkest hour in many years.\\n\\nThe French press on Monday was filled with questions about why neither President Obama nor Kerry attended Sunday\\u2019s march, as about 40 leaders of other nations did. Obama was said to have stayed away because his own security needs can be taxing on a country, and Kerry had prior commitments.\\n\\nAmong roughly 40 leaders who did attend was Israeli Prime Minister Benjamin Netanyahu, no stranger to intense security, who marched beside Hollande through the city streets. The highest ranking U.S. officials attending the march were Jane Hartley, the ambassador to France, and Victoria Nuland, the assistant secretary of state for European affairs. Attorney General Eric H. Holder Jr. was in Paris for meetings with law enforcement officials but did not participate in the march.\\n\\nKerry spent Sunday at a business summit hosted by India\\u2019s prime minister, Narendra Modi. The United States is eager for India to relax stringent laws that function as barriers to foreign investment and hopes Modi\\u2019s government will act to open the huge Indian market for more American businesses.\\n\\nIn a news conference, Kerry brushed aside criticism that the United States had not sent a more senior official to Paris as \\u201cquibbling a little bit.\\u201d He noted that many staffers of the American Embassy in Paris attended the march, including the ambassador. He said he had wanted to be present at the march himself but could not because of his prior commitments in India.\\n\\n\\u201cBut that is why I am going there on the way home, to make it crystal clear how passionately we feel about the events that have taken place there,\\u201d he said.\\n\\n\\u201cAnd I don\\u2019t think the people of France have any doubts about America\\u2019s understanding of what happened, of our personal sense of loss and our deep commitment to the people of France in this moment of trauma.\\u201d\",\"3\":\"\\u2014 Kaydee King (@KaydeeKing) November 9, 2016 The lesson from tonight's Dem losses: Time for Democrats to start listening to the voters. Stop running the same establishment candidates. \\n\\u2014 People For Bernie (@People4Bernie) November 9, 2016 If Dems didn't want a tight race they shouldn't have worked against Bernie. \\n\\u2014 Walker Bragman (@WalkerBragman) November 9, 2016 \\nNew York Times columnist Paul Krugman, who was one of Hillary Clinton\\u2019s most outspoken surrogates during the contentious Democratic primary, blamed Clinton\\u2019s poor performance on Green Party candidate Jill Stein, who has so far received a negligible number of votes nationally, saying Stein was the Ralph Nader of 2016 in preventing a Clinton victory. The account @BerniesTeachers threw Krugman\\u2019s analysis back in his face. Your candidate was the issue. Take responsibility. https:\\/\\/t.co\\/KHyOuUSrFS \\n\\u2014 Teachers for Bernie (@BerniesTeachers) November 9, 2016 \\nAna Navarro, a Republican who recently endorsed Hillary Clinton, summed up the preposterous nature of the 2016 presidential election in this tweet: GOP nominated the only damn candidate who could lose to Hillary Clinton. Democrats nominated the only damn candidate who could lose to Trump \\n\\u2014 Ana Navarro (@ananavarro) November 9, 2016 \\nPopular left-wing Facebook page The Other 98%, which was pro-Sanders during the primary, responded to Trump\\u2019s surge by simply posting a meme of Sanders\\u2019 face with the text \\u201cAll this could\\u2019ve been avoided. Thanks for nothing, DNC!\\u201d The meme has been shared almost 15,000 times in less than an hour: \\nPosted by The Other 98% on Tuesday, November 8, 2016 \\nWhile Bernie Sanders endorsed Hillary Clinton just before the Democratic National Convention in July, many of his supporters remained adamant in their refusal to support the DNC-anointed candidate, pointing to WikiLeaks\\u2019 revelations that top officials at the DNC had been working behind the scenes to tip the scales in Clinton\\u2019s favor by coordinating with media figures to circulate anti-Sanders narratives. \\nRather than attribute a potential Trump presidency to the GOP nominee\\u2019s perceived popularity among voters, the closeness of this election could be credited to Hillary Clinton\\u2019s unfavorable ratings. According to RealClearPolitics, anywhere between 51 and 57 percent of voters had a negative opinion of the Democratic nominee. \\nAs of 11 PM Eastern, Florida, Michigan, Pennsylvania, and Wisconsin remain too close to call. Clinton has 197 electoral votes to Trump\\u2019s 187. \\n\\nZach Cartwright is an activist and author from Richmond, Virginia. He enjoys writing about politics, government, and the media. Send him an email at [email protected]\"},\"label\":{\"0\":\"FAKE\",\"1\":\"FAKE\",\"2\":\"REAL\",\"3\":\"FAKE\"}}"}}]
true
1
<start_data_description><data_path>textdb3/fake_or_real_news.csv: <column_names> ['Unnamed: 0', 'title', 'text', 'label'] <column_types> {'Unnamed: 0': 'int64', 'title': 'object', 'text': 'object', 'label': 'object'} <dataframe_Summary> {'Unnamed: 0': {'count': 6335.0, 'mean': 5280.4156274664565, 'std': 3038.503952617909, 'min': 2.0, '25%': 2674.5, '50%': 5271.0, '75%': 7901.0, 'max': 10557.0}} <dataframe_info> RangeIndex: 6335 entries, 0 to 6334 Data columns (total 4 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 6335 non-null int64 1 title 6335 non-null object 2 text 6335 non-null object 3 label 6335 non-null object dtypes: int64(1), object(3) memory usage: 198.1+ KB <some_examples> {'Unnamed: 0': {'0': 8476, '1': 10294, '2': 3608, '3': 10142}, 'title': {'0': 'You Can Smell Hillary’s Fear', '1': 'Watch The Exact Moment Paul Ryan Committed Political Suicide At A Trump Rally (VIDEO)', '2': 'Kerry to go to Paris in gesture of sympathy', '3': "Bernie supporters on Twitter erupt in anger against the DNC: 'We tried to warn you!'"}, 'text': {'0': 'Daniel Greenfield, a Shillman Journalism Fellow at the Freedom Center, is a New York writer focusing on radical Islam. \nIn the final stretch of the election, Hillary Rodham Clinton has gone to war with the FBI. \nThe word “unprecedented” has been thrown around so often this election that it ought to be retired. But it’s still unprecedented for the nominee of a major political party to go war with the FBI. \nBut that’s exactly what Hillary and her people have done. Coma patients just waking up now and watching an hour of CNN from their hospital beds would assume that FBI Director James Comey is Hillary’s opponent in this election. \nThe FBI is under attack by everyone from Obama to CNN. Hillary’s people have circulated a letter attacking Comey. There are currently more media hit pieces lambasting him than targeting Trump. It wouldn’t be too surprising if the Clintons or their allies were to start running attack ads against the FBI. \nThe FBI’s leadership is being warned that the entire left-wing establishment will form a lynch mob if they continue going after Hillary. And the FBI’s credibility is being attacked by the media and the Democrats to preemptively head off the results of the investigation of the Clinton Foundation and Hillary Clinton. \nThe covert struggle between FBI agents and Obama’s DOJ people has gone explosively public. \nThe New York Times has compared Comey to J. Edgar Hoover. Its bizarre headline, “James Comey Role Recalls Hoover’s FBI, Fairly or Not” practically admits up front that it’s spouting nonsense. The Boston Globe has published a column calling for Comey’s resignation. Not to be outdone, Time has an editorial claiming that the scandal is really an attack on all women. \nJames Carville appeared on MSNBC to remind everyone that he was still alive and insane. He accused Comey of coordinating with House Republicans and the KGB. And you thought the “vast right wing conspiracy” was a stretch. \nCountless media stories charge Comey with violating procedure. Do you know what’s a procedural violation? Emailing classified information stored on your bathroom server. \nSenator Harry Reid has sent Comey a letter accusing him of violating the Hatch Act. The Hatch Act is a nice idea that has as much relevance in the age of Obama as the Tenth Amendment. But the cable news spectrum quickly filled with media hacks glancing at the Wikipedia article on the Hatch Act under the table while accusing the FBI director of one of the most awkward conspiracies against Hillary ever. \nIf James Comey is really out to hurt Hillary, he picked one hell of a strange way to do it. \nNot too long ago Democrats were breathing a sigh of relief when he gave Hillary Clinton a pass in a prominent public statement. If he really were out to elect Trump by keeping the email scandal going, why did he trash the investigation? Was he on the payroll of House Republicans and the KGB back then and playing it coy or was it a sudden development where Vladimir Putin and Paul Ryan talked him into taking a look at Anthony Weiner’s computer? \nEither Comey is the most cunning FBI director that ever lived or he’s just awkwardly trying to navigate a political mess that has trapped him between a DOJ leadership whose political futures are tied to Hillary’s victory and his own bureau whose apolitical agents just want to be allowed to do their jobs. \nThe only truly mysterious thing is why Hillary and her associates decided to go to war with a respected Federal agency. Most Americans like the FBI while Hillary Clinton enjoys a 60% unfavorable rating. \nAnd it’s an interesting question. \nHillary’s old strategy was to lie and deny that the FBI even had a criminal investigation underway. Instead her associates insisted that it was a security review. The FBI corrected her and she shrugged it off. But the old breezy denial approach has given way to a savage assault on the FBI. \nPretending that nothing was wrong was a bad strategy, but it was a better one that picking a fight with the FBI while lunatic Clinton associates try to claim that the FBI is really the KGB. \nThere are two possible explanations. \nHillary Clinton might be arrogant enough to lash out at the FBI now that she believes that victory is near. The same kind of hubris that led her to plan her victory fireworks display could lead her to declare a war on the FBI for irritating her during the final miles of her campaign. \nBut the other explanation is that her people panicked. \nGoing to war with the FBI is not the behavior of a smart and focused presidential campaign. It’s an act of desperation. When a presidential candidate decides that her only option is to try and destroy the credibility of the FBI, that’s not hubris, it’s fear of what the FBI might be about to reveal about her. \nDuring the original FBI investigation, Hillary Clinton was confident that she could ride it out. And she had good reason for believing that. But that Hillary Clinton is gone. In her place is a paranoid wreck. Within a short space of time the “positive” Clinton campaign promising to unite the country has been replaced by a desperate and flailing operation that has focused all its energy on fighting the FBI. \nThere’s only one reason for such bizarre behavior. \nThe Clinton campaign has decided that an FBI investigation of the latest batch of emails poses a threat to its survival. And so it’s gone all in on fighting the FBI. It’s an unprecedented step born of fear. It’s hard to know whether that fear is justified. But the existence of that fear already tells us a whole lot. \nClinton loyalists rigged the old investigation. They knew the outcome ahead of time as well as they knew the debate questions. Now suddenly they are no longer in control. And they are afraid. \nYou can smell the fear. \nThe FBI has wiretaps from the investigation of the Clinton Foundation. It’s finding new emails all the time. And Clintonworld panicked. The spinmeisters of Clintonworld have claimed that the email scandal is just so much smoke without fire. All that’s here is the appearance of impropriety without any of the substance. But this isn’t how you react to smoke. It’s how you respond to a fire. \nThe misguided assault on the FBI tells us that Hillary Clinton and her allies are afraid of a revelation bigger than the fundamental illegality of her email setup. The email setup was a preemptive cover up. The Clinton campaign has panicked badly out of the belief, right or wrong, that whatever crime the illegal setup was meant to cover up is at risk of being exposed. \nThe Clintons have weathered countless scandals over the years. Whatever they are protecting this time around is bigger than the usual corruption, bribery, sexual assaults and abuses of power that have followed them around throughout the years. This is bigger and more damaging than any of the allegations that have already come out. And they don’t want FBI investigators anywhere near it. \nThe campaign against Comey is pure intimidation. It’s also a warning. Any senior FBI people who value their careers are being warned to stay away. The Democrats are closing ranks around their nominee against the FBI. It’s an ugly and unprecedented scene. It may also be their last stand. \nHillary Clinton has awkwardly wound her way through numerous scandals in just this election cycle. But she’s never shown fear or desperation before. Now that has changed. Whatever she is afraid of, it lies buried in her emails with Huma Abedin. And it can bring her down like nothing else has. ', '1': 'Google Pinterest Digg Linkedin Reddit Stumbleupon Print Delicious Pocket Tumblr \nThere are two fundamental truths in this world: Paul Ryan desperately wants to be president. And Paul Ryan will never be president. Today proved it. \nIn a particularly staggering example of political cowardice, Paul Ryan re-re-re-reversed course and announced that he was back on the Trump Train after all. This was an aboutface from where he was a few weeks ago. He had previously declared he would not be supporting or defending Trump after a tape was made public in which Trump bragged about assaulting women. Suddenly, Ryan was appearing at a pro-Trump rally and boldly declaring that he already sent in his vote to make him President of the United States. It was a surreal moment. The figurehead of the Republican Party dosed himself in gasoline, got up on a stage on a chilly afternoon in Wisconsin, and lit a match. . @SpeakerRyan says he voted for @realDonaldTrump : “Republicans, it is time to come home” https://t.co/VyTT49YvoE pic.twitter.com/wCvSCg4a5I \n— ABC News Politics (@ABCPolitics) November 5, 2016 \nThe Democratic Party couldn’t have asked for a better moment of film. Ryan’s chances of ever becoming president went down to zero in an instant. In the wreckage Trump is to leave behind in his wake, those who cravenly backed his campaign will not recover. If Ryan’s career manages to limp all the way to 2020, then the DNC will have this tape locked and loaded to be used in every ad until Election Day. \nThe ringing endorsement of the man he clearly hates on a personal level speaks volumes about his own spinelessness. Ryan has postured himself as a “principled” conservative, and one uncomfortable with Trump’s unapologetic bigotry and sexism. However, when push came to shove, Paul Ryan – like many of his colleagues – turned into a sniveling appeaser. After all his lofty tak about conviction, his principles were a house of cards and collapsed with the slightest breeze. \nWhat’s especially bizarre is how close Ryan came to making it through unscathed. For months the Speaker of the House refused to comment on Trump at all. His strategy seemed to be to keep his head down, pretend Trump didn’t exist, and hope that nobody remembered what happened in 2016. Now, just days away from the election, he screwed it all up. \nIf 2016’s very ugly election has done any good it’s by exposing the utter cowardice of the Republicans who once feigned moral courage. A reality television star spit on them, hijacked their party, insulted their wives, and got every last one of them to kneel before him. What a turn of events. \nFeatured image via Twitter', '2': 'U.S. Secretary of State John F. Kerry said Monday that he will stop in Paris later this week, amid criticism that no top American officials attended Sunday’s unity march against terrorism.\n\nKerry said he expects to arrive in Paris Thursday evening, as he heads home after a week abroad. He said he will fly to France at the conclusion of a series of meetings scheduled for Thursday in Sofia, Bulgaria. He plans to meet the next day with Foreign Minister Laurent Fabius and President Francois Hollande, then return to Washington.\n\nThe visit by Kerry, who has family and childhood ties to the country and speaks fluent French, could address some of the criticism that the United States snubbed France in its darkest hour in many years.\n\nThe French press on Monday was filled with questions about why neither President Obama nor Kerry attended Sunday’s march, as about 40 leaders of other nations did. Obama was said to have stayed away because his own security needs can be taxing on a country, and Kerry had prior commitments.\n\nAmong roughly 40 leaders who did attend was Israeli Prime Minister Benjamin Netanyahu, no stranger to intense security, who marched beside Hollande through the city streets. The highest ranking U.S. officials attending the march were Jane Hartley, the ambassador to France, and Victoria Nuland, the assistant secretary of state for European affairs. Attorney General Eric H. Holder Jr. was in Paris for meetings with law enforcement officials but did not participate in the march.\n\nKerry spent Sunday at a business summit hosted by India’s prime minister, Narendra Modi. The United States is eager for India to relax stringent laws that function as barriers to foreign investment and hopes Modi’s government will act to open the huge Indian market for more American businesses.\n\nIn a news conference, Kerry brushed aside criticism that the United States had not sent a more senior official to Paris as “quibbling a little bit.” He noted that many staffers of the American Embassy in Paris attended the march, including the ambassador. He said he had wanted to be present at the march himself but could not because of his prior commitments in India.\n\n“But that is why I am going there on the way home, to make it crystal clear how passionately we feel about the events that have taken place there,” he said.\n\n“And I don’t think the people of France have any doubts about America’s understanding of what happened, of our personal sense of loss and our deep commitment to the people of France in this moment of trauma.”', '3': "— Kaydee King (@KaydeeKing) November 9, 2016 The lesson from tonight's Dem losses: Time for Democrats to start listening to the voters. Stop running the same establishment candidates. \n— People For Bernie (@People4Bernie) November 9, 2016 If Dems didn't want a tight race they shouldn't have worked against Bernie. \n— Walker Bragman (@WalkerBragman) November 9, 2016 \nNew York Times columnist Paul Krugman, who was one of Hillary Clinton’s most outspoken surrogates during the contentious Democratic primary, blamed Clinton’s poor performance on Green Party candidate Jill Stein, who has so far received a negligible number of votes nationally, saying Stein was the Ralph Nader of 2016 in preventing a Clinton victory. The account @BerniesTeachers threw Krugman’s analysis back in his face. Your candidate was the issue. Take responsibility. https://t.co/KHyOuUSrFS \n— Teachers for Bernie (@BerniesTeachers) November 9, 2016 \nAna Navarro, a Republican who recently endorsed Hillary Clinton, summed up the preposterous nature of the 2016 presidential election in this tweet: GOP nominated the only damn candidate who could lose to Hillary Clinton. Democrats nominated the only damn candidate who could lose to Trump \n— Ana Navarro (@ananavarro) November 9, 2016 \nPopular left-wing Facebook page The Other 98%, which was pro-Sanders during the primary, responded to Trump’s surge by simply posting a meme of Sanders’ face with the text “All this could’ve been avoided. Thanks for nothing, DNC!” The meme has been shared almost 15,000 times in less than an hour: \nPosted by The Other 98% on Tuesday, November 8, 2016 \nWhile Bernie Sanders endorsed Hillary Clinton just before the Democratic National Convention in July, many of his supporters remained adamant in their refusal to support the DNC-anointed candidate, pointing to WikiLeaks’ revelations that top officials at the DNC had been working behind the scenes to tip the scales in Clinton’s favor by coordinating with media figures to circulate anti-Sanders narratives. \nRather than attribute a potential Trump presidency to the GOP nominee’s perceived popularity among voters, the closeness of this election could be credited to Hillary Clinton’s unfavorable ratings. According to RealClearPolitics, anywhere between 51 and 57 percent of voters had a negative opinion of the Democratic nominee. \nAs of 11 PM Eastern, Florida, Michigan, Pennsylvania, and Wisconsin remain too close to call. Clinton has 197 electoral votes to Trump’s 187. \n\nZach Cartwright is an activist and author from Richmond, Virginia. He enjoys writing about politics, government, and the media. Send him an email at [email protected]"}, 'label': {'0': 'FAKE', '1': 'FAKE', '2': 'REAL', '3': 'FAKE'}} <end_description>
594
1
1,488
594
69332886
<jupyter_start><jupyter_text>EfficientNet-PytTorch-3D # "EfficientNet-PyTorch-3D" * License and other information : https://github.com/shijianjian/EfficientNet-PyTorch-3D # Usage ```python import sys sys.path.append('../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D') ``` ```python from efficientnet_pytorch_3d import EfficientNet3D ``` **PLEASE UPVOTE** IF this dataset is helpful to you Kaggle dataset identifier: efficientnetpyttorch3d <jupyter_script># ## Use stacked images (3D) and Efficientnet3D model # Acknowledgements: # - https://www.kaggle.com/ihelon/brain-tumor-eda-with-animations-and-modeling # - https://www.kaggle.com/furcifer/torch-efficientnet3d-for-mri-no-train # - https://github.com/shijianjian/EfficientNet-PyTorch-3D # # # Use models with only one MRI type, then ensemble the 4 models # import os import sys import json import glob import random import collections import time import numpy as np import pandas as pd import pydicom import cv2 import matplotlib.pyplot as plt import seaborn as sns import torch from torch import nn from torch.utils import data as torch_data from sklearn import model_selection as sk_model_selection from torch.nn import functional as torch_functional import torch.nn.functional as F from sklearn.model_selection import StratifiedKFold from sklearn.metrics import roc_auc_score if os.path.exists("../input/rsna-miccai-brain-tumor-radiogenomic-classification"): data_directory = "../input/rsna-miccai-brain-tumor-radiogenomic-classification" pytorch3dpath = "../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D" else: data_directory = ( "/media/roland/data/kaggle/rsna-miccai-brain-tumor-radiogenomic-classification" ) pytorch3dpath = "EfficientNet-PyTorch-3D" mri_types = ["FLAIR", "T1w", "T1wCE", "T2w"] SIZE = 256 NUM_IMAGES = 64 sys.path.append(pytorch3dpath) from efficientnet_pytorch_3d import EfficientNet3D # ## Functions to load images def load_dicom_image(path, img_size=SIZE): dicom = pydicom.read_file(path) data = dicom.pixel_array if np.min(data) == np.max(data): data = np.zeros((img_size, img_size)) return data data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) # data = (data * 255).astype(np.uint8) data = cv2.resize(data, (img_size, img_size)) return data def load_dicom_images_3d( scan_id, num_imgs=NUM_IMAGES, img_size=SIZE, mri_type="FLAIR", split="train" ): files = sorted(glob.glob(f"{data_directory}/{split}/{scan_id}/{mri_type}/*.dcm")) middle = len(files) // 2 num_imgs2 = num_imgs // 2 p1 = max(0, middle - num_imgs2) p2 = min(len(files), middle + num_imgs2) img3d = np.stack([load_dicom_image(f) for f in files[p1:p2]]).T if img3d.shape[-1] < num_imgs: n_zero = np.zeros((img_size, img_size, num_imgs - img3d.shape[-1])) img3d = np.concatenate((img3d, n_zero), axis=-1) return np.expand_dims(img3d, 0) load_dicom_images_3d("00000").shape def set_seed(seed): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True set_seed(42) # ## train / test splits train_df = pd.read_csv(f"{data_directory}/train_labels.csv") display(train_df) df_train, df_valid = sk_model_selection.train_test_split( train_df, test_size=0.2, random_state=42, stratify=train_df["MGMT_value"], ) df_train.tail() # ## Model and training classes class Dataset(torch_data.Dataset): def __init__( self, paths, targets=None, mri_type=None, label_smoothing=0.01, split="train" ): self.paths = paths self.targets = targets self.mri_type = mri_type self.label_smoothing = label_smoothing self.split = split def __len__(self): return len(self.paths) def __getitem__(self, index): scan_id = self.paths[index] if self.targets is None: data = load_dicom_images_3d( str(scan_id).zfill(5), mri_type=self.mri_type[index], split=self.split ) else: data = load_dicom_images_3d( str(scan_id).zfill(5), mri_type=self.mri_type[index], split="train" ) if self.targets is None: return {"X": torch.tensor(data).float(), "id": scan_id} else: y = torch.tensor( abs(self.targets[index] - self.label_smoothing), dtype=torch.float ) return {"X": torch.tensor(data).float(), "y": y} class Model(nn.Module): def __init__(self): super().__init__() self.net = EfficientNet3D.from_name( "efficientnet-b0", override_params={"num_classes": 2}, in_channels=1 ) n_features = self.net._fc.in_features self.net._fc = nn.Linear(in_features=n_features, out_features=1, bias=True) def forward(self, x): out = self.net(x) return out class Trainer: def __init__(self, model, device, optimizer, criterion): self.model = model self.device = device self.optimizer = optimizer self.criterion = criterion self.best_valid_score = np.inf self.n_patience = 0 self.lastmodel = None def fit(self, epochs, train_loader, valid_loader, save_path, patience): for n_epoch in range(1, epochs + 1): self.info_message("EPOCH: {}", n_epoch) train_loss, train_time = self.train_epoch(train_loader) valid_loss, valid_auc, valid_time = self.valid_epoch(valid_loader) self.info_message( "[Epoch Train: {}] loss: {:.4f}, time: {:.2f} s ", n_epoch, train_loss, train_time, ) self.info_message( "[Epoch Valid: {}] loss: {:.4f}, auc: {:.4f}, time: {:.2f} s", n_epoch, valid_loss, valid_auc, valid_time, ) # if True: # if self.best_valid_score < valid_auc: if self.best_valid_score > valid_loss: self.save_model(n_epoch, save_path, valid_loss, valid_auc) self.info_message( "auc improved from {:.4f} to {:.4f}. Saved model to '{}'", self.best_valid_score, valid_loss, self.lastmodel, ) self.best_valid_score = valid_loss self.n_patience = 0 else: self.n_patience += 1 if self.n_patience >= patience: self.info_message( "\nValid auc didn't improve last {} epochs.", patience ) break def train_epoch(self, train_loader): self.model.train() t = time.time() sum_loss = 0 for step, batch in enumerate(train_loader, 1): X = batch["X"].to(self.device) targets = batch["y"].to(self.device) self.optimizer.zero_grad() outputs = self.model(X).squeeze(1) loss = self.criterion(outputs, targets) loss.backward() sum_loss += loss.detach().item() self.optimizer.step() message = "Train Step {}/{}, train_loss: {:.4f}" self.info_message( message, step, len(train_loader), sum_loss / step, end="\r" ) return sum_loss / len(train_loader), int(time.time() - t) def valid_epoch(self, valid_loader): self.model.eval() t = time.time() sum_loss = 0 y_all = [] outputs_all = [] for step, batch in enumerate(valid_loader, 1): with torch.no_grad(): X = batch["X"].to(self.device) targets = batch["y"].to(self.device) outputs = self.model(X).squeeze(1) loss = self.criterion(outputs, targets) sum_loss += loss.detach().item() y_all.extend(batch["y"].tolist()) outputs_all.extend(outputs.tolist()) message = "Valid Step {}/{}, valid_loss: {:.4f}" self.info_message( message, step, len(valid_loader), sum_loss / step, end="\r" ) y_all = [1 if x > 0.5 else 0 for x in y_all] auc = roc_auc_score(y_all, outputs_all) return sum_loss / len(valid_loader), auc, int(time.time() - t) def save_model(self, n_epoch, save_path, loss, auc): self.lastmodel = f"{save_path}-e{n_epoch}-loss{loss:.3f}-auc{auc:.3f}.pth" torch.save( { "model_state_dict": self.model.state_dict(), "optimizer_state_dict": self.optimizer.state_dict(), "best_valid_score": self.best_valid_score, "n_epoch": n_epoch, }, self.lastmodel, ) @staticmethod def info_message(message, *args, end="\n"): print(message.format(*args), end=end) # ## train models device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def train_mri_type(df_train, df_valid, mri_type): if mri_type == "all": train_list = [] valid_list = [] for mri_type in mri_types: df_train.loc[:, "MRI_Type"] = mri_type train_list.append(df_train.copy()) df_valid.loc[:, "MRI_Type"] = mri_type valid_list.append(df_valid.copy()) df_train = pd.concat(train_list) df_valid = pd.concat(valid_list) else: df_train.loc[:, "MRI_Type"] = mri_type df_valid.loc[:, "MRI_Type"] = mri_type print(df_train.shape, df_valid.shape) display(df_train.head()) train_data_retriever = Dataset( df_train["BraTS21ID"].values, df_train["MGMT_value"].values, df_train["MRI_Type"].values, ) valid_data_retriever = Dataset( df_valid["BraTS21ID"].values, df_valid["MGMT_value"].values, df_valid["MRI_Type"].values, ) train_loader = torch_data.DataLoader( train_data_retriever, batch_size=4, shuffle=True, num_workers=8, ) valid_loader = torch_data.DataLoader( valid_data_retriever, batch_size=4, shuffle=False, num_workers=8, ) model = Model() model.to(device) # checkpoint = torch.load("best-model-all-auc0.555.pth") # model.load_state_dict(checkpoint["model_state_dict"]) # print(model) optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) criterion = torch_functional.binary_cross_entropy_with_logits trainer = Trainer(model, device, optimizer, criterion) history = trainer.fit( 10, train_loader, valid_loader, f"{mri_type}", 10, ) return trainer.lastmodel modelfiles = None if not modelfiles: modelfiles = [train_mri_type(df_train, df_valid, m) for m in mri_types] print(modelfiles) # ## Predict function def predict(modelfile, df, mri_type, split): print("Predict:", modelfile, mri_type, df.shape) df.loc[:, "MRI_Type"] = mri_type data_retriever = Dataset( df.index.values, mri_type=df["MRI_Type"].values, split=split ) data_loader = torch_data.DataLoader( data_retriever, batch_size=4, shuffle=False, num_workers=8, ) model = Model() model.to(device) checkpoint = torch.load(modelfile) model.load_state_dict(checkpoint["model_state_dict"]) model.eval() y_pred = [] ids = [] for e, batch in enumerate(data_loader, 1): print(f"{e}/{len(data_loader)}", end="\r") with torch.no_grad(): tmp_pred = ( torch.sigmoid(model(batch["X"].to(device))).cpu().numpy().squeeze() ) if tmp_pred.size == 1: y_pred.append(tmp_pred) else: y_pred.extend(tmp_pred.tolist()) ids.extend(batch["id"].numpy().tolist()) preddf = pd.DataFrame({"BraTS21ID": ids, "MGMT_value": y_pred}) preddf = preddf.set_index("BraTS21ID") return preddf # ## Ensemble for validation df_valid = df_valid.set_index("BraTS21ID") df_valid["MGMT_pred"] = 0 for m, mtype in zip(modelfiles, mri_types): pred = predict(m, df_valid, mtype, "train") df_valid["MGMT_pred"] += pred["MGMT_value"] df_valid["MGMT_pred"] /= len(modelfiles) auc = roc_auc_score(df_valid["MGMT_value"], df_valid["MGMT_pred"]) print(f"Validation ensemble AUC: {auc:.4f}") sns.displot(df_valid["MGMT_pred"]) # ## Ensemble for submission submission = pd.read_csv( f"{data_directory}/sample_submission.csv", index_col="BraTS21ID" ) submission["MGMT_value"] = 0 for m, mtype in zip(modelfiles, mri_types): pred = predict(m, submission, mtype, split="test") submission["MGMT_value"] += pred["MGMT_value"] submission["MGMT_value"] /= len(modelfiles) submission["MGMT_value"].to_csv("submission.csv") submission sns.displot(submission["MGMT_value"])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332886.ipynb
efficientnetpyttorch3d
hihunjin
[{"Id": 69332886, "ScriptId": 18797520, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 692339, "CreationDate": "07/29/2021 16:51:03", "VersionNumber": 10.0, "Title": "Efficientnet3D with one MRI type", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 422.0, "LinesInsertedFromPrevious": 9.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 413.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 18}]
[{"Id": 92371396, "KernelVersionId": 69332886, "SourceDatasetVersionId": 2423144}]
[{"Id": 2423144, "DatasetId": 1466252, "DatasourceVersionId": 2465335, "CreatorUserId": 3746632, "LicenseName": "Unknown", "CreationDate": "07/14/2021 01:53:53", "VersionNumber": 1.0, "Title": "EfficientNet-PytTorch-3D", "Slug": "efficientnetpyttorch3d", "Subtitle": NaN, "Description": "# \"EfficientNet-PyTorch-3D\"\n\n* License and other information : https://github.com/shijianjian/EfficientNet-PyTorch-3D\n\n# Usage\n\n```python\nimport sys\nsys.path.append('../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D')\n```\n\n```python\nfrom efficientnet_pytorch_3d import EfficientNet3D\n```\n\n**PLEASE UPVOTE** IF this dataset is helpful to you", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1466252, "CreatorUserId": 3746632, "OwnerUserId": 3746632.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2423144.0, "CurrentDatasourceVersionId": 2465335.0, "ForumId": 1485872, "Type": 2, "CreationDate": "07/14/2021 01:53:53", "LastActivityDate": "07/14/2021", "TotalViews": 4459, "TotalDownloads": 443, "TotalVotes": 22, "TotalKernels": 44}]
[{"Id": 3746632, "UserName": "hihunjin", "DisplayName": "hihunjin", "RegisterDate": "09/22/2019", "PerformanceTier": 2}]
# ## Use stacked images (3D) and Efficientnet3D model # Acknowledgements: # - https://www.kaggle.com/ihelon/brain-tumor-eda-with-animations-and-modeling # - https://www.kaggle.com/furcifer/torch-efficientnet3d-for-mri-no-train # - https://github.com/shijianjian/EfficientNet-PyTorch-3D # # # Use models with only one MRI type, then ensemble the 4 models # import os import sys import json import glob import random import collections import time import numpy as np import pandas as pd import pydicom import cv2 import matplotlib.pyplot as plt import seaborn as sns import torch from torch import nn from torch.utils import data as torch_data from sklearn import model_selection as sk_model_selection from torch.nn import functional as torch_functional import torch.nn.functional as F from sklearn.model_selection import StratifiedKFold from sklearn.metrics import roc_auc_score if os.path.exists("../input/rsna-miccai-brain-tumor-radiogenomic-classification"): data_directory = "../input/rsna-miccai-brain-tumor-radiogenomic-classification" pytorch3dpath = "../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D" else: data_directory = ( "/media/roland/data/kaggle/rsna-miccai-brain-tumor-radiogenomic-classification" ) pytorch3dpath = "EfficientNet-PyTorch-3D" mri_types = ["FLAIR", "T1w", "T1wCE", "T2w"] SIZE = 256 NUM_IMAGES = 64 sys.path.append(pytorch3dpath) from efficientnet_pytorch_3d import EfficientNet3D # ## Functions to load images def load_dicom_image(path, img_size=SIZE): dicom = pydicom.read_file(path) data = dicom.pixel_array if np.min(data) == np.max(data): data = np.zeros((img_size, img_size)) return data data = data - np.min(data) if np.max(data) != 0: data = data / np.max(data) # data = (data * 255).astype(np.uint8) data = cv2.resize(data, (img_size, img_size)) return data def load_dicom_images_3d( scan_id, num_imgs=NUM_IMAGES, img_size=SIZE, mri_type="FLAIR", split="train" ): files = sorted(glob.glob(f"{data_directory}/{split}/{scan_id}/{mri_type}/*.dcm")) middle = len(files) // 2 num_imgs2 = num_imgs // 2 p1 = max(0, middle - num_imgs2) p2 = min(len(files), middle + num_imgs2) img3d = np.stack([load_dicom_image(f) for f in files[p1:p2]]).T if img3d.shape[-1] < num_imgs: n_zero = np.zeros((img_size, img_size, num_imgs - img3d.shape[-1])) img3d = np.concatenate((img3d, n_zero), axis=-1) return np.expand_dims(img3d, 0) load_dicom_images_3d("00000").shape def set_seed(seed): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True set_seed(42) # ## train / test splits train_df = pd.read_csv(f"{data_directory}/train_labels.csv") display(train_df) df_train, df_valid = sk_model_selection.train_test_split( train_df, test_size=0.2, random_state=42, stratify=train_df["MGMT_value"], ) df_train.tail() # ## Model and training classes class Dataset(torch_data.Dataset): def __init__( self, paths, targets=None, mri_type=None, label_smoothing=0.01, split="train" ): self.paths = paths self.targets = targets self.mri_type = mri_type self.label_smoothing = label_smoothing self.split = split def __len__(self): return len(self.paths) def __getitem__(self, index): scan_id = self.paths[index] if self.targets is None: data = load_dicom_images_3d( str(scan_id).zfill(5), mri_type=self.mri_type[index], split=self.split ) else: data = load_dicom_images_3d( str(scan_id).zfill(5), mri_type=self.mri_type[index], split="train" ) if self.targets is None: return {"X": torch.tensor(data).float(), "id": scan_id} else: y = torch.tensor( abs(self.targets[index] - self.label_smoothing), dtype=torch.float ) return {"X": torch.tensor(data).float(), "y": y} class Model(nn.Module): def __init__(self): super().__init__() self.net = EfficientNet3D.from_name( "efficientnet-b0", override_params={"num_classes": 2}, in_channels=1 ) n_features = self.net._fc.in_features self.net._fc = nn.Linear(in_features=n_features, out_features=1, bias=True) def forward(self, x): out = self.net(x) return out class Trainer: def __init__(self, model, device, optimizer, criterion): self.model = model self.device = device self.optimizer = optimizer self.criterion = criterion self.best_valid_score = np.inf self.n_patience = 0 self.lastmodel = None def fit(self, epochs, train_loader, valid_loader, save_path, patience): for n_epoch in range(1, epochs + 1): self.info_message("EPOCH: {}", n_epoch) train_loss, train_time = self.train_epoch(train_loader) valid_loss, valid_auc, valid_time = self.valid_epoch(valid_loader) self.info_message( "[Epoch Train: {}] loss: {:.4f}, time: {:.2f} s ", n_epoch, train_loss, train_time, ) self.info_message( "[Epoch Valid: {}] loss: {:.4f}, auc: {:.4f}, time: {:.2f} s", n_epoch, valid_loss, valid_auc, valid_time, ) # if True: # if self.best_valid_score < valid_auc: if self.best_valid_score > valid_loss: self.save_model(n_epoch, save_path, valid_loss, valid_auc) self.info_message( "auc improved from {:.4f} to {:.4f}. Saved model to '{}'", self.best_valid_score, valid_loss, self.lastmodel, ) self.best_valid_score = valid_loss self.n_patience = 0 else: self.n_patience += 1 if self.n_patience >= patience: self.info_message( "\nValid auc didn't improve last {} epochs.", patience ) break def train_epoch(self, train_loader): self.model.train() t = time.time() sum_loss = 0 for step, batch in enumerate(train_loader, 1): X = batch["X"].to(self.device) targets = batch["y"].to(self.device) self.optimizer.zero_grad() outputs = self.model(X).squeeze(1) loss = self.criterion(outputs, targets) loss.backward() sum_loss += loss.detach().item() self.optimizer.step() message = "Train Step {}/{}, train_loss: {:.4f}" self.info_message( message, step, len(train_loader), sum_loss / step, end="\r" ) return sum_loss / len(train_loader), int(time.time() - t) def valid_epoch(self, valid_loader): self.model.eval() t = time.time() sum_loss = 0 y_all = [] outputs_all = [] for step, batch in enumerate(valid_loader, 1): with torch.no_grad(): X = batch["X"].to(self.device) targets = batch["y"].to(self.device) outputs = self.model(X).squeeze(1) loss = self.criterion(outputs, targets) sum_loss += loss.detach().item() y_all.extend(batch["y"].tolist()) outputs_all.extend(outputs.tolist()) message = "Valid Step {}/{}, valid_loss: {:.4f}" self.info_message( message, step, len(valid_loader), sum_loss / step, end="\r" ) y_all = [1 if x > 0.5 else 0 for x in y_all] auc = roc_auc_score(y_all, outputs_all) return sum_loss / len(valid_loader), auc, int(time.time() - t) def save_model(self, n_epoch, save_path, loss, auc): self.lastmodel = f"{save_path}-e{n_epoch}-loss{loss:.3f}-auc{auc:.3f}.pth" torch.save( { "model_state_dict": self.model.state_dict(), "optimizer_state_dict": self.optimizer.state_dict(), "best_valid_score": self.best_valid_score, "n_epoch": n_epoch, }, self.lastmodel, ) @staticmethod def info_message(message, *args, end="\n"): print(message.format(*args), end=end) # ## train models device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def train_mri_type(df_train, df_valid, mri_type): if mri_type == "all": train_list = [] valid_list = [] for mri_type in mri_types: df_train.loc[:, "MRI_Type"] = mri_type train_list.append(df_train.copy()) df_valid.loc[:, "MRI_Type"] = mri_type valid_list.append(df_valid.copy()) df_train = pd.concat(train_list) df_valid = pd.concat(valid_list) else: df_train.loc[:, "MRI_Type"] = mri_type df_valid.loc[:, "MRI_Type"] = mri_type print(df_train.shape, df_valid.shape) display(df_train.head()) train_data_retriever = Dataset( df_train["BraTS21ID"].values, df_train["MGMT_value"].values, df_train["MRI_Type"].values, ) valid_data_retriever = Dataset( df_valid["BraTS21ID"].values, df_valid["MGMT_value"].values, df_valid["MRI_Type"].values, ) train_loader = torch_data.DataLoader( train_data_retriever, batch_size=4, shuffle=True, num_workers=8, ) valid_loader = torch_data.DataLoader( valid_data_retriever, batch_size=4, shuffle=False, num_workers=8, ) model = Model() model.to(device) # checkpoint = torch.load("best-model-all-auc0.555.pth") # model.load_state_dict(checkpoint["model_state_dict"]) # print(model) optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) criterion = torch_functional.binary_cross_entropy_with_logits trainer = Trainer(model, device, optimizer, criterion) history = trainer.fit( 10, train_loader, valid_loader, f"{mri_type}", 10, ) return trainer.lastmodel modelfiles = None if not modelfiles: modelfiles = [train_mri_type(df_train, df_valid, m) for m in mri_types] print(modelfiles) # ## Predict function def predict(modelfile, df, mri_type, split): print("Predict:", modelfile, mri_type, df.shape) df.loc[:, "MRI_Type"] = mri_type data_retriever = Dataset( df.index.values, mri_type=df["MRI_Type"].values, split=split ) data_loader = torch_data.DataLoader( data_retriever, batch_size=4, shuffle=False, num_workers=8, ) model = Model() model.to(device) checkpoint = torch.load(modelfile) model.load_state_dict(checkpoint["model_state_dict"]) model.eval() y_pred = [] ids = [] for e, batch in enumerate(data_loader, 1): print(f"{e}/{len(data_loader)}", end="\r") with torch.no_grad(): tmp_pred = ( torch.sigmoid(model(batch["X"].to(device))).cpu().numpy().squeeze() ) if tmp_pred.size == 1: y_pred.append(tmp_pred) else: y_pred.extend(tmp_pred.tolist()) ids.extend(batch["id"].numpy().tolist()) preddf = pd.DataFrame({"BraTS21ID": ids, "MGMT_value": y_pred}) preddf = preddf.set_index("BraTS21ID") return preddf # ## Ensemble for validation df_valid = df_valid.set_index("BraTS21ID") df_valid["MGMT_pred"] = 0 for m, mtype in zip(modelfiles, mri_types): pred = predict(m, df_valid, mtype, "train") df_valid["MGMT_pred"] += pred["MGMT_value"] df_valid["MGMT_pred"] /= len(modelfiles) auc = roc_auc_score(df_valid["MGMT_value"], df_valid["MGMT_pred"]) print(f"Validation ensemble AUC: {auc:.4f}") sns.displot(df_valid["MGMT_pred"]) # ## Ensemble for submission submission = pd.read_csv( f"{data_directory}/sample_submission.csv", index_col="BraTS21ID" ) submission["MGMT_value"] = 0 for m, mtype in zip(modelfiles, mri_types): pred = predict(m, submission, mtype, split="test") submission["MGMT_value"] += pred["MGMT_value"] submission["MGMT_value"] /= len(modelfiles) submission["MGMT_value"].to_csv("submission.csv") submission sns.displot(submission["MGMT_value"])
false
0
3,903
18
4,051
3,903
69332747
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from sklearn import model_selection, preprocessing from sklearn.model_selection import cross_val_predict, cross_val_score, cross_validate from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import scipy.stats as stats import seaborn as sns data_train = pd.read_csv( "../input/house-prices-advanced-regression-techniques/train.csv" ) data_test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv") print(data_train.shape) print(data_test.shape) data_train.head() data_train.info() data_train.describe() import numpy as np import scipy.stats as stats plt.hist( data_train["SalePrice"], bins=30, color="green", edgecolor="red", density=True, label="hist", ) kde = stats.kde.gaussian_kde(data_train["SalePrice"]) x = np.linspace(0, 750000, 100) y = kde(x) plt.plot(x, y, c="black", linewidth=3, label="k-density") plt.xlabel("Sale price") plt.legend() plt.show() data_test.head() data_test.info() data_test.describe() data_test = data_test.fillna(data_test.mean()) data_train = data_train.fillna(data_train.mean()) data_combined = pd.concat([data_train, data_test], axis=0) num_feats = data_combined.dtypes[data_train.dtypes != "object"].index scaler = preprocessing.StandardScaler().fit(data_combined[num_feats]) data_combined[num_feats] = pd.DataFrame(scaler.transform(data_train[num_feats])) data_combined = pd.get_dummies(data_combined).reset_index(drop=True) new_data_train = data_combined.iloc[: len(data_train), :] new_data_test = data_combined.iloc[len(data_train) :, :] # X_train = new_train_data.drop('SalePrice', axis=1) # y_train = np.log1p(new_train_data['SalePrice'].values.ravel()) X_test_set = new_data_test.drop("SalePrice", axis=1) target = new_data_train.SalePrice feats = new_data_train.drop("SalePrice", axis=1) X_train, X_test, y_train, y_test = train_test_split(feats, target, test_size=0.2) print(X_train.shape) print(X_test_set.shape) lr = LinearRegression() lr.fit(X_train, y_train) print("Coefficient de détermination du modèle :", lr.score(X_train, y_train)) print( "Coefficient de détermination obtenu par Cv :", cross_val_score(lr, X_train, y_train).mean(), ) lr.score(X_test, y_test) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_regression sk = SelectKBest(f_regression, k=8) sk.fit(X=feats, y=target) feats.columns[sk.get_support()] sns.pairplot( new_data_train[ [ "OverallQual", "TotalBsmtSF", "1stFlrSF", "GrLivArea", "FullBath", "GarageCars", "GarageArea", "ExterQual_TA", "SalePrice", ] ] ) sk_train = sk.transform(X_train) sk_test = sk.transform(X_test) sklr = LinearRegression() sklr.fit(sk_train, y_train) print(sklr.score(sk_train, y_train)) print(sklr.score(sk_test, y_test)) from sklearn.feature_selection import SelectFromModel lr = LinearRegression() sfm = SelectFromModel(lr) sfm_train = sfm.fit_transform(X_train, y_train) sfm_test = sfm.transform(X_test) feats.columns[sfm.get_support()] sfmlr = LinearRegression() sfmlr.fit(sfm_train, y_train) print(sfmlr.score(sfm_train, y_train)) print(sfmlr.score(sfm_test, y_test)) from sklearn.metrics import mean_squared_error from sklearn.linear_model import RidgeCV ridge_reg = RidgeCV(alphas=(0.001, 0.01, 0.1, 0.3, 0.7, 1, 10, 50, 100)) ridge_reg.fit(X_train, y_train) print("alpha sélectionné par c-v :", ridge_reg.alpha_) print("score train :", ridge_reg.score(X_train, y_train)) print("score test :", ridge_reg.score(X_test, y_test)) ridge_pred_train = ridge_reg.predict(X_train) ridge_pred_test = ridge_reg.predict(X_test) print("mse train:", mean_squared_error(ridge_pred_train, y_train)) print("mse test:", mean_squared_error(ridge_pred_test, y_test)) from sklearn.linear_model import lasso_path mes_alphas = (0.001, 0.01, 0.02, 0.025, 0.05, 0.1, 0.25, 0.5, 0.8, 1.0) alpha_path, coefs_lasso, _ = lasso_path(X_train, y_train, alphas=mes_alphas) coefs_lasso.shape import matplotlib.cm as cm plt.figure(figsize=(10, 7)) for i in range(coefs_lasso.shape[0]): plt.plot(alpha_path, coefs_lasso[i, :], "--") plt.xlabel("Alpha") plt.ylabel("Coefficients") plt.title("Lasso path") plt.show() from sklearn.linear_model import LassoCV model_lasso = LassoCV(cv=10).fit(X_train, y_train) alphas = model_lasso.alphas_ plt.figure(figsize=(10, 8)) plt.plot(alphas, model_lasso.mse_path_, ":") plt.plot(alphas, model_lasso.mse_path_.mean(axis=1), "k", label="Moyenne", linewidth=2) plt.axvline(model_lasso.alpha_, linestyle="--", color="k", label="alpha: estimation CV") plt.legend() plt.xlabel("Alpha") plt.ylabel("Mean square error") plt.title("Mean square error pour chaque échantillon ") plt.show() pred_test = model_lasso.predict(X_test) print("score test:", model_lasso.score(X_test, y_test)) print("mse test:", mean_squared_error(pred_test, y_test)) coef = pd.Series(model_lasso.coef_, index=X_train.columns) print( "Lasso a gardé " + str(sum(coef != 0)) + " et a éliminé les " + str(sum(coef == 0)) + " autres variables" ) imp_coef = pd.concat( [coef.sort_values(ascending=False).head(10), coef.sort_values().head(10)] ) imp_coef plt.figure(figsize=(10, 8)) imp_coef.plot(kind="barh") plt.title("Coefficients in the Lasso Model") from sklearn.linear_model import ElasticNetCV model_en = ElasticNetCV( cv=8, l1_ratio=(0.1, 0.25, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.99), alphas=(0.001, 0.01, 0.02, 0.025, 0.05, 0.1, 0.25, 0.5, 0.8, 1.0), ) model_en.fit(X_train, y_train) alphas = model_en.alphas_ plt.figure(figsize=(10, 10)) for i in range(model_en.mse_path_.shape[0]): plt.plot( alphas, model_en.mse_path_[i, :, :].mean(axis=1), label="Moyenne pour l1_ratio= %.2f" % model_en.l1_ratio[i], linewidth=2, ) plt.legend() plt.xlabel("Alpha") plt.ylabel("Mean squared error") plt.title("Mean squared error pour chaque $\lambda$") plt.show() pred_train = model_en.predict(X_train) pred_test = model_en.predict(X_test) print(np.sqrt(mean_squared_error(y_train, pred_train))) print(np.sqrt(mean_squared_error(y_test, pred_test))) print("score train:", model_en.score(X_train, y_train)) print("score test:", model_en.score(X_test, y_test)) moy = scaler.mean_[-1] ec = scaler.scale_[-1] print("moyenne :", moy) print("ecart-type", ec) pd.DataFrame( { "prices_given": (y_test * ec) + moy, "prices_predited": np.round((pred_test * ec) + moy), }, index=X_test.index, ).head(10) y_test_set = model_en.predict(X_test_set) new_data_test.head() submission_df = pd.DataFrame( {"Id": data_test["Id"], "SalePrice": np.round((y_test_set * ec) + moy)} ) submission_df.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332747.ipynb
null
null
[{"Id": 69332747, "ScriptId": 18923690, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5048266, "CreationDate": "07/29/2021 16:49:07", "VersionNumber": 2.0, "Title": "Housepricing advanced regression", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 283.0, "LinesInsertedFromPrevious": 47.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 236.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from sklearn import model_selection, preprocessing from sklearn.model_selection import cross_val_predict, cross_val_score, cross_validate from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import scipy.stats as stats import seaborn as sns data_train = pd.read_csv( "../input/house-prices-advanced-regression-techniques/train.csv" ) data_test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv") print(data_train.shape) print(data_test.shape) data_train.head() data_train.info() data_train.describe() import numpy as np import scipy.stats as stats plt.hist( data_train["SalePrice"], bins=30, color="green", edgecolor="red", density=True, label="hist", ) kde = stats.kde.gaussian_kde(data_train["SalePrice"]) x = np.linspace(0, 750000, 100) y = kde(x) plt.plot(x, y, c="black", linewidth=3, label="k-density") plt.xlabel("Sale price") plt.legend() plt.show() data_test.head() data_test.info() data_test.describe() data_test = data_test.fillna(data_test.mean()) data_train = data_train.fillna(data_train.mean()) data_combined = pd.concat([data_train, data_test], axis=0) num_feats = data_combined.dtypes[data_train.dtypes != "object"].index scaler = preprocessing.StandardScaler().fit(data_combined[num_feats]) data_combined[num_feats] = pd.DataFrame(scaler.transform(data_train[num_feats])) data_combined = pd.get_dummies(data_combined).reset_index(drop=True) new_data_train = data_combined.iloc[: len(data_train), :] new_data_test = data_combined.iloc[len(data_train) :, :] # X_train = new_train_data.drop('SalePrice', axis=1) # y_train = np.log1p(new_train_data['SalePrice'].values.ravel()) X_test_set = new_data_test.drop("SalePrice", axis=1) target = new_data_train.SalePrice feats = new_data_train.drop("SalePrice", axis=1) X_train, X_test, y_train, y_test = train_test_split(feats, target, test_size=0.2) print(X_train.shape) print(X_test_set.shape) lr = LinearRegression() lr.fit(X_train, y_train) print("Coefficient de détermination du modèle :", lr.score(X_train, y_train)) print( "Coefficient de détermination obtenu par Cv :", cross_val_score(lr, X_train, y_train).mean(), ) lr.score(X_test, y_test) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_regression sk = SelectKBest(f_regression, k=8) sk.fit(X=feats, y=target) feats.columns[sk.get_support()] sns.pairplot( new_data_train[ [ "OverallQual", "TotalBsmtSF", "1stFlrSF", "GrLivArea", "FullBath", "GarageCars", "GarageArea", "ExterQual_TA", "SalePrice", ] ] ) sk_train = sk.transform(X_train) sk_test = sk.transform(X_test) sklr = LinearRegression() sklr.fit(sk_train, y_train) print(sklr.score(sk_train, y_train)) print(sklr.score(sk_test, y_test)) from sklearn.feature_selection import SelectFromModel lr = LinearRegression() sfm = SelectFromModel(lr) sfm_train = sfm.fit_transform(X_train, y_train) sfm_test = sfm.transform(X_test) feats.columns[sfm.get_support()] sfmlr = LinearRegression() sfmlr.fit(sfm_train, y_train) print(sfmlr.score(sfm_train, y_train)) print(sfmlr.score(sfm_test, y_test)) from sklearn.metrics import mean_squared_error from sklearn.linear_model import RidgeCV ridge_reg = RidgeCV(alphas=(0.001, 0.01, 0.1, 0.3, 0.7, 1, 10, 50, 100)) ridge_reg.fit(X_train, y_train) print("alpha sélectionné par c-v :", ridge_reg.alpha_) print("score train :", ridge_reg.score(X_train, y_train)) print("score test :", ridge_reg.score(X_test, y_test)) ridge_pred_train = ridge_reg.predict(X_train) ridge_pred_test = ridge_reg.predict(X_test) print("mse train:", mean_squared_error(ridge_pred_train, y_train)) print("mse test:", mean_squared_error(ridge_pred_test, y_test)) from sklearn.linear_model import lasso_path mes_alphas = (0.001, 0.01, 0.02, 0.025, 0.05, 0.1, 0.25, 0.5, 0.8, 1.0) alpha_path, coefs_lasso, _ = lasso_path(X_train, y_train, alphas=mes_alphas) coefs_lasso.shape import matplotlib.cm as cm plt.figure(figsize=(10, 7)) for i in range(coefs_lasso.shape[0]): plt.plot(alpha_path, coefs_lasso[i, :], "--") plt.xlabel("Alpha") plt.ylabel("Coefficients") plt.title("Lasso path") plt.show() from sklearn.linear_model import LassoCV model_lasso = LassoCV(cv=10).fit(X_train, y_train) alphas = model_lasso.alphas_ plt.figure(figsize=(10, 8)) plt.plot(alphas, model_lasso.mse_path_, ":") plt.plot(alphas, model_lasso.mse_path_.mean(axis=1), "k", label="Moyenne", linewidth=2) plt.axvline(model_lasso.alpha_, linestyle="--", color="k", label="alpha: estimation CV") plt.legend() plt.xlabel("Alpha") plt.ylabel("Mean square error") plt.title("Mean square error pour chaque échantillon ") plt.show() pred_test = model_lasso.predict(X_test) print("score test:", model_lasso.score(X_test, y_test)) print("mse test:", mean_squared_error(pred_test, y_test)) coef = pd.Series(model_lasso.coef_, index=X_train.columns) print( "Lasso a gardé " + str(sum(coef != 0)) + " et a éliminé les " + str(sum(coef == 0)) + " autres variables" ) imp_coef = pd.concat( [coef.sort_values(ascending=False).head(10), coef.sort_values().head(10)] ) imp_coef plt.figure(figsize=(10, 8)) imp_coef.plot(kind="barh") plt.title("Coefficients in the Lasso Model") from sklearn.linear_model import ElasticNetCV model_en = ElasticNetCV( cv=8, l1_ratio=(0.1, 0.25, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.99), alphas=(0.001, 0.01, 0.02, 0.025, 0.05, 0.1, 0.25, 0.5, 0.8, 1.0), ) model_en.fit(X_train, y_train) alphas = model_en.alphas_ plt.figure(figsize=(10, 10)) for i in range(model_en.mse_path_.shape[0]): plt.plot( alphas, model_en.mse_path_[i, :, :].mean(axis=1), label="Moyenne pour l1_ratio= %.2f" % model_en.l1_ratio[i], linewidth=2, ) plt.legend() plt.xlabel("Alpha") plt.ylabel("Mean squared error") plt.title("Mean squared error pour chaque $\lambda$") plt.show() pred_train = model_en.predict(X_train) pred_test = model_en.predict(X_test) print(np.sqrt(mean_squared_error(y_train, pred_train))) print(np.sqrt(mean_squared_error(y_test, pred_test))) print("score train:", model_en.score(X_train, y_train)) print("score test:", model_en.score(X_test, y_test)) moy = scaler.mean_[-1] ec = scaler.scale_[-1] print("moyenne :", moy) print("ecart-type", ec) pd.DataFrame( { "prices_given": (y_test * ec) + moy, "prices_predited": np.round((pred_test * ec) + moy), }, index=X_test.index, ).head(10) y_test_set = model_en.predict(X_test_set) new_data_test.head() submission_df = pd.DataFrame( {"Id": data_test["Id"], "SalePrice": np.round((y_test_set * ec) + moy)} ) submission_df.head()
false
0
2,756
0
2,756
2,756
69332026
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import calendar import xgboost import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import ( train_test_split, GridSearchCV, LeaveOneOut, LeaveOneGroupOut, StratifiedKFold, ) from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, RobustScaler from sklearn.multioutput import MultiOutputRegressor from sklearn.linear_model import ( LinearRegression, MultiTaskElasticNetCV, MultiTaskLassoCV, ) from sklearn.metrics import mean_squared_log_error pd.set_option("display.max_colwidth", None) import warnings warnings.filterwarnings("ignore") from catboost import CatBoostRegressor train = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/train.csv") test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv") print("Shape of train: ", train.shape) print("Shape of test: ", test.shape) # Lets understand the terminology first: # - Humidity: Is the amount of water or moisture present in the air in form of water vapour # - Relative Humidity: Is the percentage of moisture against the highest possible level of moisture in the air at specific temperature # - Absolute Humidity: Is the measure of moisture in the air regardless of temperature and expressed as grams of moisture per cubic meter of air(g/m3) # Making a copy of train and test data train_copy = train.copy() test_copy = test.copy() # Divide 'relative_humidity' column by 100, to convert from percentage train["relative_humidity"] = train["relative_humidity"] / 100 test["relative_humidity"] = test["relative_humidity"] / 100 # Check for outliers plt.figure(figsize=(25, 20)) plt.subplot(4, 4, 1) sns.boxplot(train["sensor_1"]) plt.subplot(4, 4, 2) sns.boxplot(train["sensor_2"]) plt.subplot(4, 4, 3) sns.boxplot(train["sensor_3"]) plt.subplot(4, 4, 4) sns.boxplot(train["sensor_4"]) plt.subplot(4, 4, 5) sns.boxplot(train["sensor_5"]) plt.subplot(4, 4, 6) sns.boxplot(train["relative_humidity"]) plt.subplot(4, 4, 7) sns.boxplot(train["absolute_humidity"]) plt.subplot(4, 4, 8) sns.boxplot(train["deg_C"]) # Lets look at the records in detail display(train[train["sensor_2"] > 2250]) print() display(train[train["sensor_3"] > 2400]) print() display(train[train["sensor_4"] > 2800]) print() display(train[train["absolute_humidity"] > 2.2]) # Drop indexes 6160, 5520, 4462, 6586, 6587, 6589, 6590, 6592 index = [6160, 5520, 4462, 6586, 6587, 6589, 6590, 6592] train = train.drop(labels=index, axis=0) # Check distribution of target variables plt.figure(figsize=(13, 10)) plt.subplot(2, 2, 1) sns.histplot(train["target_benzene"], kde=True) plt.subplot(2, 2, 2) sns.histplot(train["target_carbon_monoxide"], kde=True) plt.subplot(2, 2, 3) sns.histplot(train["target_nitrogen_oxides"], kde=True) # Convert distribution plt.figure(figsize=(13, 10)) plt.subplot(2, 2, 1) sns.histplot(np.sqrt(train["target_benzene"]), kde=True, color="Green") plt.subplot(2, 2, 2) sns.histplot(np.sqrt(train["target_carbon_monoxide"]), kde=True, color="Green") plt.subplot(2, 2, 3) sns.histplot(np.log(train["target_nitrogen_oxides"]), kde=True, color="Green") train["target_benzene"] = np.sqrt(train["target_benzene"]) train["target_carbon_monoxide"] = np.sqrt(train["target_carbon_monoxide"]) train["target_nitrogen_oxides"] = np.log(train["target_nitrogen_oxides"]) # Assign target variable target = pd.DataFrame( train[["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"]] ) train = train.drop( ["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], axis=1 ) print("Shape of train: ", train.shape) print("Shape of test: ", test.shape) data = pd.concat([train, test]) data.shape # Work with date column data["date_time"] = pd.to_datetime(data["date_time"], format="%Y-%m-%d %H:%M:%S") data["hour"] = data["date_time"].dt.hour data["working_hours"] = data["hour"].isin(np.arange(8, 21, 1)).astype("int") data["is_weekend"] = (data["date_time"].dt.dayofweek >= 5).astype("int") # data['hr'] = data.date_time.dt.hour*60 +data.date_time.dt.minute # data['satday'] = (data.date_time.dt.weekday == 5).astype('int') data["Day_of_Week"] = data["date_time"].apply(lambda x: calendar.day_name[x.weekday()]) data["SMC"] = (data["absolute_humidity"] * 100) / data["relative_humidity"] data.head(3) data = data.drop(["date_time", "hour"], axis=1) data_num_cols = data._get_numeric_data().columns data_num_cols data_cat_cols = data.columns.difference(data_num_cols) data_cat_cols # Separating both numeric and categorical data from set data_num_data = data.loc[:, data_num_cols] data_cat_data = data.loc[:, data_cat_cols] print("Shape of num data:", data_num_data.shape) print("Shape of cat data:", data_cat_data.shape) s_scaler = RobustScaler() data_num_data_s = s_scaler.fit_transform(data_num_data) data_num_data_s = pd.DataFrame(data_num_data_s, columns=data_num_cols) data_cat_data = pd.get_dummies(data_cat_data) data_cat_data.head() data_num_data_s.reset_index(drop=True, inplace=True) data_cat_data.reset_index(drop=True, inplace=True) data_new = pd.concat([data_num_data_s, data_cat_data], axis=1) train_new = data_new.loc[:7102,] test_new = data_new.loc[7103:,] print("Shape of train data:", train_new.shape) print("Shape of test data:", test_new.shape) from sklearn.model_selection import train_test_split trainx, valx, trainy, valy = train_test_split( train_new, target, test_size=0.25, random_state=1234 ) # print(cust_data.shape) print(trainx.shape) print(valx.shape) # ## XGBoost xgb = xgboost.XGBRFRegressor() xgb_m = MultiOutputRegressor(xgb) xgb_m.fit(trainx, trainy) # Predecting values on train and validation sets pred_train_xgb = xgb_m.predict(trainx) pred_val_xgb = xgb_m.predict(valx) RMSLE_train_xgb = np.sqrt(mean_squared_log_error(trainy, abs(pred_train_xgb))) RMSLE_val_xgb = np.sqrt(mean_squared_log_error(valy, abs(pred_val_xgb))) RMSLE_val_xgb pred_test_xgb = xgb_m.predict(test_new) pred_test_xgb = pd.DataFrame( pred_test_xgb, columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], ) pred_test_xgb.head() submission_xgb = pd.DataFrame(test_copy[["date_time"]]) submission_xgb["target_carbon_monoxide"] = np.square( pred_test_xgb["target_carbon_monoxide"] ) submission_xgb["target_benzene"] = np.square(pred_test_xgb["target_benzene"]) submission_xgb["target_nitrogen_oxides"] = ( pred_test_xgb["target_nitrogen_oxides"] * pred_test_xgb["target_nitrogen_oxides"] * pred_test_xgb["target_nitrogen_oxides"] ) submission_xgb.head() # ## CatBoost cat = CatBoostRegressor() cat_m = MultiOutputRegressor(cat) cat_m.fit(trainx, trainy) # Predecting values on train and validation sets pred_train_cat = cat_m.predict(trainx) pred_val_cat = cat_m.predict(valx) RMSLE_train_cat = np.sqrt(mean_squared_log_error(trainy, abs(pred_train_cat))) RMSLE_val_cat = np.sqrt(mean_squared_log_error(valy, abs(pred_val_cat))) RMSLE_val_cat pred_test_cat = cat_m.predict(test_new) pred_test_cat = pd.DataFrame( pred_test_cat, columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], ) pred_test_cat.head() submission_cat = pd.DataFrame(test_copy[["date_time"]]) submission_cat["target_carbon_monoxide"] = np.square( pred_test_cat["target_carbon_monoxide"] ) submission_cat["target_benzene"] = np.square(pred_test_cat["target_benzene"]) submission_cat["target_nitrogen_oxides"] = np.exp( pred_test_cat["target_nitrogen_oxides"] ) submission_cat.head() # ## AdaBoost Regressor ada = AdaBoostRegressor() ada_m = MultiOutputRegressor(cat) ada_m.fit(trainx, trainy) # Predecting values on train and validation sets pred_train_ada = ada_m.predict(trainx) pred_val_ada = ada_m.predict(valx) RMSLE_train_ada = np.sqrt(mean_squared_log_error(trainy, abs(pred_train_ada))) RMSLE_val_ada = np.sqrt(mean_squared_log_error(valy, abs(pred_val_ada))) RMSLE_val_ada pred_test_ada = ada_m.predict(test_new) pred_test_ada = pd.DataFrame( pred_test_ada, columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], ) pred_test_ada.head() submission_ada = pd.DataFrame(test_copy[["date_time"]]) submission_ada["target_carbon_monoxide"] = np.square( pred_test_ada["target_carbon_monoxide"] ) submission_ada["target_benzene"] = np.square(pred_test_ada["target_benzene"]) submission_ada["target_nitrogen_oxides"] = np.exp( pred_test_ada["target_nitrogen_oxides"] ) submission_ada.head() # ## RandomForest Regressor rfr = RandomForestRegressor() rfr.fit(X=trainx, y=trainy) # Predecting values on train and validation sets pred_train_rfr = rfr.predict(trainx) pred_val_rfr = rfr.predict(valx) RMSLE_train_rfr = np.sqrt(mean_squared_log_error(trainy, abs(pred_train_rfr))) RMSLE_val_rfr = np.sqrt(mean_squared_log_error(valy, abs(pred_val_rfr))) RMSLE_val_rfr pred_test_rfr = rfr.predict(test_new) pred_test_rfr = pd.DataFrame( pred_test_rfr, columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], ) pred_test_rfr.head() submission_rfr = pd.DataFrame(test_copy[["date_time"]]) submission_rfr["target_carbon_monoxide"] = np.square( pred_test_rfr["target_carbon_monoxide"] ) submission_rfr["target_benzene"] = np.square(pred_test_rfr["target_benzene"]) submission_rfr["target_nitrogen_oxides"] = np.exp( pred_test_rfr["target_nitrogen_oxides"] ) submission_rfr.head() # ## LassoCV lasso_model = MultiTaskLassoCV() lasso_model.fit(trainx, trainy) pred_train_lso = lasso_model.predict(trainx) pred_val_lso = lasso_model.predict(valx) RMSLE_train_lso = np.sqrt(mean_squared_log_error(trainy, abs(pred_train_lso))) RMSLE_val_lso = np.sqrt(mean_squared_log_error(valy, abs(pred_val_lso))) RMSLE_val_lso pred_test_lso = lasso_model.predict(test_new) pred_test_lso = pd.DataFrame( pred_test_rfr, columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], ) pred_test_lso.head() submission_lso = pd.DataFrame(test_copy[["date_time"]]) submission_lso["target_carbon_monoxide"] = np.square( pred_test_lso["target_carbon_monoxide"] ) submission_lso["target_benzene"] = np.square(pred_test_lso["target_benzene"]) submission_lso["target_nitrogen_oxides"] = np.exp( pred_test_lso["target_nitrogen_oxides"] ) submission_lso.head() # ## Submission Submission = pd.DataFrame(test_copy[["date_time"]]) Submission["target_carbon_monoxide"] = ( submission_ada["target_carbon_monoxide"] + submission_cat["target_carbon_monoxide"] + submission_lso["target_carbon_monoxide"] + submission_rfr["target_carbon_monoxide"] + submission_xgb["target_carbon_monoxide"] ) / 5 Submission["target_benzene"] = ( submission_ada["target_benzene"] + submission_cat["target_benzene"] + submission_lso["target_benzene"] + submission_rfr["target_benzene"] + submission_xgb["target_benzene"] ) / 5 Submission["target_nitrogen_oxides"] = ( submission_ada["target_nitrogen_oxides"] + submission_cat["target_nitrogen_oxides"] + submission_lso["target_nitrogen_oxides"] + submission_rfr["target_nitrogen_oxides"] + submission_xgb["target_nitrogen_oxides"] ) / 5 Submission.head() Submission.to_csv("Submission.csv", index="False")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332026.ipynb
null
null
[{"Id": 69332026, "ScriptId": 18928545, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3485821, "CreationDate": "07/29/2021 16:37:48", "VersionNumber": 1.0, "Title": "Five Regression Models", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 351.0, "LinesInsertedFromPrevious": 351.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import pandas as pd import numpy as np import calendar import xgboost import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import ( train_test_split, GridSearchCV, LeaveOneOut, LeaveOneGroupOut, StratifiedKFold, ) from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor from sklearn import preprocessing from sklearn.preprocessing import StandardScaler, RobustScaler from sklearn.multioutput import MultiOutputRegressor from sklearn.linear_model import ( LinearRegression, MultiTaskElasticNetCV, MultiTaskLassoCV, ) from sklearn.metrics import mean_squared_log_error pd.set_option("display.max_colwidth", None) import warnings warnings.filterwarnings("ignore") from catboost import CatBoostRegressor train = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/train.csv") test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv") print("Shape of train: ", train.shape) print("Shape of test: ", test.shape) # Lets understand the terminology first: # - Humidity: Is the amount of water or moisture present in the air in form of water vapour # - Relative Humidity: Is the percentage of moisture against the highest possible level of moisture in the air at specific temperature # - Absolute Humidity: Is the measure of moisture in the air regardless of temperature and expressed as grams of moisture per cubic meter of air(g/m3) # Making a copy of train and test data train_copy = train.copy() test_copy = test.copy() # Divide 'relative_humidity' column by 100, to convert from percentage train["relative_humidity"] = train["relative_humidity"] / 100 test["relative_humidity"] = test["relative_humidity"] / 100 # Check for outliers plt.figure(figsize=(25, 20)) plt.subplot(4, 4, 1) sns.boxplot(train["sensor_1"]) plt.subplot(4, 4, 2) sns.boxplot(train["sensor_2"]) plt.subplot(4, 4, 3) sns.boxplot(train["sensor_3"]) plt.subplot(4, 4, 4) sns.boxplot(train["sensor_4"]) plt.subplot(4, 4, 5) sns.boxplot(train["sensor_5"]) plt.subplot(4, 4, 6) sns.boxplot(train["relative_humidity"]) plt.subplot(4, 4, 7) sns.boxplot(train["absolute_humidity"]) plt.subplot(4, 4, 8) sns.boxplot(train["deg_C"]) # Lets look at the records in detail display(train[train["sensor_2"] > 2250]) print() display(train[train["sensor_3"] > 2400]) print() display(train[train["sensor_4"] > 2800]) print() display(train[train["absolute_humidity"] > 2.2]) # Drop indexes 6160, 5520, 4462, 6586, 6587, 6589, 6590, 6592 index = [6160, 5520, 4462, 6586, 6587, 6589, 6590, 6592] train = train.drop(labels=index, axis=0) # Check distribution of target variables plt.figure(figsize=(13, 10)) plt.subplot(2, 2, 1) sns.histplot(train["target_benzene"], kde=True) plt.subplot(2, 2, 2) sns.histplot(train["target_carbon_monoxide"], kde=True) plt.subplot(2, 2, 3) sns.histplot(train["target_nitrogen_oxides"], kde=True) # Convert distribution plt.figure(figsize=(13, 10)) plt.subplot(2, 2, 1) sns.histplot(np.sqrt(train["target_benzene"]), kde=True, color="Green") plt.subplot(2, 2, 2) sns.histplot(np.sqrt(train["target_carbon_monoxide"]), kde=True, color="Green") plt.subplot(2, 2, 3) sns.histplot(np.log(train["target_nitrogen_oxides"]), kde=True, color="Green") train["target_benzene"] = np.sqrt(train["target_benzene"]) train["target_carbon_monoxide"] = np.sqrt(train["target_carbon_monoxide"]) train["target_nitrogen_oxides"] = np.log(train["target_nitrogen_oxides"]) # Assign target variable target = pd.DataFrame( train[["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"]] ) train = train.drop( ["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], axis=1 ) print("Shape of train: ", train.shape) print("Shape of test: ", test.shape) data = pd.concat([train, test]) data.shape # Work with date column data["date_time"] = pd.to_datetime(data["date_time"], format="%Y-%m-%d %H:%M:%S") data["hour"] = data["date_time"].dt.hour data["working_hours"] = data["hour"].isin(np.arange(8, 21, 1)).astype("int") data["is_weekend"] = (data["date_time"].dt.dayofweek >= 5).astype("int") # data['hr'] = data.date_time.dt.hour*60 +data.date_time.dt.minute # data['satday'] = (data.date_time.dt.weekday == 5).astype('int') data["Day_of_Week"] = data["date_time"].apply(lambda x: calendar.day_name[x.weekday()]) data["SMC"] = (data["absolute_humidity"] * 100) / data["relative_humidity"] data.head(3) data = data.drop(["date_time", "hour"], axis=1) data_num_cols = data._get_numeric_data().columns data_num_cols data_cat_cols = data.columns.difference(data_num_cols) data_cat_cols # Separating both numeric and categorical data from set data_num_data = data.loc[:, data_num_cols] data_cat_data = data.loc[:, data_cat_cols] print("Shape of num data:", data_num_data.shape) print("Shape of cat data:", data_cat_data.shape) s_scaler = RobustScaler() data_num_data_s = s_scaler.fit_transform(data_num_data) data_num_data_s = pd.DataFrame(data_num_data_s, columns=data_num_cols) data_cat_data = pd.get_dummies(data_cat_data) data_cat_data.head() data_num_data_s.reset_index(drop=True, inplace=True) data_cat_data.reset_index(drop=True, inplace=True) data_new = pd.concat([data_num_data_s, data_cat_data], axis=1) train_new = data_new.loc[:7102,] test_new = data_new.loc[7103:,] print("Shape of train data:", train_new.shape) print("Shape of test data:", test_new.shape) from sklearn.model_selection import train_test_split trainx, valx, trainy, valy = train_test_split( train_new, target, test_size=0.25, random_state=1234 ) # print(cust_data.shape) print(trainx.shape) print(valx.shape) # ## XGBoost xgb = xgboost.XGBRFRegressor() xgb_m = MultiOutputRegressor(xgb) xgb_m.fit(trainx, trainy) # Predecting values on train and validation sets pred_train_xgb = xgb_m.predict(trainx) pred_val_xgb = xgb_m.predict(valx) RMSLE_train_xgb = np.sqrt(mean_squared_log_error(trainy, abs(pred_train_xgb))) RMSLE_val_xgb = np.sqrt(mean_squared_log_error(valy, abs(pred_val_xgb))) RMSLE_val_xgb pred_test_xgb = xgb_m.predict(test_new) pred_test_xgb = pd.DataFrame( pred_test_xgb, columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], ) pred_test_xgb.head() submission_xgb = pd.DataFrame(test_copy[["date_time"]]) submission_xgb["target_carbon_monoxide"] = np.square( pred_test_xgb["target_carbon_monoxide"] ) submission_xgb["target_benzene"] = np.square(pred_test_xgb["target_benzene"]) submission_xgb["target_nitrogen_oxides"] = ( pred_test_xgb["target_nitrogen_oxides"] * pred_test_xgb["target_nitrogen_oxides"] * pred_test_xgb["target_nitrogen_oxides"] ) submission_xgb.head() # ## CatBoost cat = CatBoostRegressor() cat_m = MultiOutputRegressor(cat) cat_m.fit(trainx, trainy) # Predecting values on train and validation sets pred_train_cat = cat_m.predict(trainx) pred_val_cat = cat_m.predict(valx) RMSLE_train_cat = np.sqrt(mean_squared_log_error(trainy, abs(pred_train_cat))) RMSLE_val_cat = np.sqrt(mean_squared_log_error(valy, abs(pred_val_cat))) RMSLE_val_cat pred_test_cat = cat_m.predict(test_new) pred_test_cat = pd.DataFrame( pred_test_cat, columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], ) pred_test_cat.head() submission_cat = pd.DataFrame(test_copy[["date_time"]]) submission_cat["target_carbon_monoxide"] = np.square( pred_test_cat["target_carbon_monoxide"] ) submission_cat["target_benzene"] = np.square(pred_test_cat["target_benzene"]) submission_cat["target_nitrogen_oxides"] = np.exp( pred_test_cat["target_nitrogen_oxides"] ) submission_cat.head() # ## AdaBoost Regressor ada = AdaBoostRegressor() ada_m = MultiOutputRegressor(cat) ada_m.fit(trainx, trainy) # Predecting values on train and validation sets pred_train_ada = ada_m.predict(trainx) pred_val_ada = ada_m.predict(valx) RMSLE_train_ada = np.sqrt(mean_squared_log_error(trainy, abs(pred_train_ada))) RMSLE_val_ada = np.sqrt(mean_squared_log_error(valy, abs(pred_val_ada))) RMSLE_val_ada pred_test_ada = ada_m.predict(test_new) pred_test_ada = pd.DataFrame( pred_test_ada, columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], ) pred_test_ada.head() submission_ada = pd.DataFrame(test_copy[["date_time"]]) submission_ada["target_carbon_monoxide"] = np.square( pred_test_ada["target_carbon_monoxide"] ) submission_ada["target_benzene"] = np.square(pred_test_ada["target_benzene"]) submission_ada["target_nitrogen_oxides"] = np.exp( pred_test_ada["target_nitrogen_oxides"] ) submission_ada.head() # ## RandomForest Regressor rfr = RandomForestRegressor() rfr.fit(X=trainx, y=trainy) # Predecting values on train and validation sets pred_train_rfr = rfr.predict(trainx) pred_val_rfr = rfr.predict(valx) RMSLE_train_rfr = np.sqrt(mean_squared_log_error(trainy, abs(pred_train_rfr))) RMSLE_val_rfr = np.sqrt(mean_squared_log_error(valy, abs(pred_val_rfr))) RMSLE_val_rfr pred_test_rfr = rfr.predict(test_new) pred_test_rfr = pd.DataFrame( pred_test_rfr, columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], ) pred_test_rfr.head() submission_rfr = pd.DataFrame(test_copy[["date_time"]]) submission_rfr["target_carbon_monoxide"] = np.square( pred_test_rfr["target_carbon_monoxide"] ) submission_rfr["target_benzene"] = np.square(pred_test_rfr["target_benzene"]) submission_rfr["target_nitrogen_oxides"] = np.exp( pred_test_rfr["target_nitrogen_oxides"] ) submission_rfr.head() # ## LassoCV lasso_model = MultiTaskLassoCV() lasso_model.fit(trainx, trainy) pred_train_lso = lasso_model.predict(trainx) pred_val_lso = lasso_model.predict(valx) RMSLE_train_lso = np.sqrt(mean_squared_log_error(trainy, abs(pred_train_lso))) RMSLE_val_lso = np.sqrt(mean_squared_log_error(valy, abs(pred_val_lso))) RMSLE_val_lso pred_test_lso = lasso_model.predict(test_new) pred_test_lso = pd.DataFrame( pred_test_rfr, columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"], ) pred_test_lso.head() submission_lso = pd.DataFrame(test_copy[["date_time"]]) submission_lso["target_carbon_monoxide"] = np.square( pred_test_lso["target_carbon_monoxide"] ) submission_lso["target_benzene"] = np.square(pred_test_lso["target_benzene"]) submission_lso["target_nitrogen_oxides"] = np.exp( pred_test_lso["target_nitrogen_oxides"] ) submission_lso.head() # ## Submission Submission = pd.DataFrame(test_copy[["date_time"]]) Submission["target_carbon_monoxide"] = ( submission_ada["target_carbon_monoxide"] + submission_cat["target_carbon_monoxide"] + submission_lso["target_carbon_monoxide"] + submission_rfr["target_carbon_monoxide"] + submission_xgb["target_carbon_monoxide"] ) / 5 Submission["target_benzene"] = ( submission_ada["target_benzene"] + submission_cat["target_benzene"] + submission_lso["target_benzene"] + submission_rfr["target_benzene"] + submission_xgb["target_benzene"] ) / 5 Submission["target_nitrogen_oxides"] = ( submission_ada["target_nitrogen_oxides"] + submission_cat["target_nitrogen_oxides"] + submission_lso["target_nitrogen_oxides"] + submission_rfr["target_nitrogen_oxides"] + submission_xgb["target_nitrogen_oxides"] ) / 5 Submission.head() Submission.to_csv("Submission.csv", index="False")
false
0
4,362
0
4,362
4,362
69332853
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/train.csv") test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv") submit = pd.read_csv( "/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv" ) df.head() features = [feature for feature in df.columns if df[feature].dtypes != "O"] features import matplotlib.pyplot as plt for column in features: plt.plot(df[column]) plt.title(column) plt.show() import datetime df["date_time"] = pd.to_datetime(df["date_time"]) df["day"] = df["date_time"].map(lambda x: x.day) df["month"] = df["date_time"].map(lambda x: x.month) df["year"] = df["date_time"].map(lambda x: x.year) df["hour"] = df["date_time"].map(lambda x: x.hour) df.head() import seaborn as sns sns.heatmap(df.corr()) target = df.iloc[:, 9:12] target df.head() # X_features = [feature for feature in df.columns if 'target' not in feature] X_features = [feature for feature in df.columns if "target" not in feature] X = df[X_features] X.head() X = X.drop("date_time", axis=1) X.head() y_co = df["target_carbon_monoxide"] y_be = df["target_benzene"] y_no = df["target_nitrogen_oxides"] # Creating Train test split from sklearn.model_selection import train_test_split X_train, X_test, yco_train, yco_test = train_test_split( X, y_co, test_size=0.3, random_state=0 ) import datetime test["date_time"] = pd.to_datetime(test["date_time"]) test["day"] = test["date_time"].map(lambda x: x.day) test["month"] = test["date_time"].map(lambda x: x.month) test["year"] = test["date_time"].map(lambda x: x.year) test["hour"] = test["date_time"].map(lambda x: x.hour) test.head() from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) test = test.drop("date_time", axis=1) test = sc.transform(test) import pandas as pd import numpy as np import tensorflow as tf from tensorflow import keras import keras_tuner as kt def model_builder(hp): model = keras.Sequential() for i in range(hp.Int("num_layers", 2, 20)): model.add( keras.layers.Dense( units=hp.Int("units_" + str(i), min_value=32, max_value=512, step=32), activation="relu", ) ) # Tune the number of units in the first Dense layer # Choose an optimal value between 32-512 model.add(keras.layers.Dense(1, activation="linear")) # Tune the learning rate for the optimizer # Choose an optimal value from 0.01, 0.001, or 0.0001 hp_learning_rate = hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4]) model.compile( optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate), loss="mean_squared_error", metrics=["mean_squared_error"], ) return model from keras_tuner import RandomSearch tuner = RandomSearch( model_builder, objective="val_mean_squared_error", max_trials=5, executions_per_trial=5, directory="co", project_name="Co", ) tuner.search_space_summary tuner.search(X_train, yco_train, epochs=50, validation_data=(X_test, yco_test)) tuner.results_summary() best_model = tuner.get_best_models(num_models=1)[0] loss, mse = best_model.evaluate(X_test, yco_test) # Predict values for CO test Y_CO = best_model.predict(X_test) Y_CO.shape yco_test.shape # Calculating rmse on test set from sklearn.metrics import mean_squared_error from math import sqrt rms_co = sqrt(mean_squared_error(yco_test, Y_CO)) rms_co # 1.38 when 10epochs, 5 trails, 3 exe # Predicting actual test X using the best model co_sub = best_model.predict(test) co_sub # Creating a dir for Benzene in the similar way from keras_tuner import RandomSearch tuner_be = RandomSearch( model_builder, objective="val_mean_squared_error", max_trials=5, executions_per_trial=5, directory="be", project_name="Be", ) # Creating Train test split from sklearn.model_selection import train_test_split X_train, X_test, ybe_train, ybe_test = train_test_split( X, y_be, test_size=0.3, random_state=4 ) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) tuner_be.search(X_train, ybe_train, epochs=50, validation_data=(X_test, ybe_test)) tuner_be.results_summary() tuner_be.results_summary() best_model_be = tuner_be.get_best_models(num_models=1)[0] loss, mse = best_model_be.evaluate(X_test, ybe_test) # Predicted value for benzene Y_benzene = best_model_be.predict(X_test) # Calculating Accuracy rms_be = sqrt(mean_squared_error(ybe_test, Y_benzene)) rms_be # 1.12 when 50epochs, 5 trails, 5 exe be_sub = best_model_be.predict(test) be_sub # Creating a dir for Nitrogen in the similar way from keras_tuner import RandomSearch tuner_no = RandomSearch( model_builder, objective="val_mean_squared_error", max_trials=5, executions_per_trial=5, directory="no", project_name="No", ) # Creating Train test split from sklearn.model_selection import train_test_split X_train, X_test, yno_train, yno_test = train_test_split( X, y_no, test_size=0.3, random_state=0 ) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) tuner_no.search(X_train, yno_train, epochs=50, validation_data=(X_test, yno_test)) tuner_no.results_summary() best_model_no = tuner_no.get_best_models(num_models=1)[0] loss, mse = best_model_no.evaluate(X_test, yno_test) # Predicted value for NO Y_NO = best_model_no.predict(X_test) # Calculating Accuracy rms_no = sqrt(mean_squared_error(yno_test, Y_NO)) rms_no # 83.93 when 10epochs, 5 trails, 3 exe # Predicting actual test X for Nitrogen no_sub = best_model_no.predict(test) no_sub # ## Creating Submission submit.head() # Coverting the predictions to a dataframe y_sub_co = pd.DataFrame(co_sub, columns=["target_carbon_monoxide"]) y_sub_co y_sub_benz = pd.DataFrame(be_sub, columns=["target_benzene"]) print(y_sub_benz) y_sub_NO = pd.DataFrame(no_sub, columns=["target_nitrogen_oxides"]) y_sub_NO sub = pd.concat([submit["date_time"], y_sub_co, y_sub_benz, y_sub_NO], axis=1) sub # Downloading the file sub.to_csv("submission.csv", index=False) print("Your submission was successfully saved!")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332853.ipynb
null
null
[{"Id": 69332853, "ScriptId": 18568554, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1946522, "CreationDate": "07/29/2021 16:50:33", "VersionNumber": 3.0, "Title": "TPS July-Keras Tuner", "EvaluationDate": "07/29/2021", "IsChange": false, "TotalLines": 239.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 239.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/train.csv") test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv") submit = pd.read_csv( "/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv" ) df.head() features = [feature for feature in df.columns if df[feature].dtypes != "O"] features import matplotlib.pyplot as plt for column in features: plt.plot(df[column]) plt.title(column) plt.show() import datetime df["date_time"] = pd.to_datetime(df["date_time"]) df["day"] = df["date_time"].map(lambda x: x.day) df["month"] = df["date_time"].map(lambda x: x.month) df["year"] = df["date_time"].map(lambda x: x.year) df["hour"] = df["date_time"].map(lambda x: x.hour) df.head() import seaborn as sns sns.heatmap(df.corr()) target = df.iloc[:, 9:12] target df.head() # X_features = [feature for feature in df.columns if 'target' not in feature] X_features = [feature for feature in df.columns if "target" not in feature] X = df[X_features] X.head() X = X.drop("date_time", axis=1) X.head() y_co = df["target_carbon_monoxide"] y_be = df["target_benzene"] y_no = df["target_nitrogen_oxides"] # Creating Train test split from sklearn.model_selection import train_test_split X_train, X_test, yco_train, yco_test = train_test_split( X, y_co, test_size=0.3, random_state=0 ) import datetime test["date_time"] = pd.to_datetime(test["date_time"]) test["day"] = test["date_time"].map(lambda x: x.day) test["month"] = test["date_time"].map(lambda x: x.month) test["year"] = test["date_time"].map(lambda x: x.year) test["hour"] = test["date_time"].map(lambda x: x.hour) test.head() from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) test = test.drop("date_time", axis=1) test = sc.transform(test) import pandas as pd import numpy as np import tensorflow as tf from tensorflow import keras import keras_tuner as kt def model_builder(hp): model = keras.Sequential() for i in range(hp.Int("num_layers", 2, 20)): model.add( keras.layers.Dense( units=hp.Int("units_" + str(i), min_value=32, max_value=512, step=32), activation="relu", ) ) # Tune the number of units in the first Dense layer # Choose an optimal value between 32-512 model.add(keras.layers.Dense(1, activation="linear")) # Tune the learning rate for the optimizer # Choose an optimal value from 0.01, 0.001, or 0.0001 hp_learning_rate = hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4]) model.compile( optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate), loss="mean_squared_error", metrics=["mean_squared_error"], ) return model from keras_tuner import RandomSearch tuner = RandomSearch( model_builder, objective="val_mean_squared_error", max_trials=5, executions_per_trial=5, directory="co", project_name="Co", ) tuner.search_space_summary tuner.search(X_train, yco_train, epochs=50, validation_data=(X_test, yco_test)) tuner.results_summary() best_model = tuner.get_best_models(num_models=1)[0] loss, mse = best_model.evaluate(X_test, yco_test) # Predict values for CO test Y_CO = best_model.predict(X_test) Y_CO.shape yco_test.shape # Calculating rmse on test set from sklearn.metrics import mean_squared_error from math import sqrt rms_co = sqrt(mean_squared_error(yco_test, Y_CO)) rms_co # 1.38 when 10epochs, 5 trails, 3 exe # Predicting actual test X using the best model co_sub = best_model.predict(test) co_sub # Creating a dir for Benzene in the similar way from keras_tuner import RandomSearch tuner_be = RandomSearch( model_builder, objective="val_mean_squared_error", max_trials=5, executions_per_trial=5, directory="be", project_name="Be", ) # Creating Train test split from sklearn.model_selection import train_test_split X_train, X_test, ybe_train, ybe_test = train_test_split( X, y_be, test_size=0.3, random_state=4 ) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) tuner_be.search(X_train, ybe_train, epochs=50, validation_data=(X_test, ybe_test)) tuner_be.results_summary() tuner_be.results_summary() best_model_be = tuner_be.get_best_models(num_models=1)[0] loss, mse = best_model_be.evaluate(X_test, ybe_test) # Predicted value for benzene Y_benzene = best_model_be.predict(X_test) # Calculating Accuracy rms_be = sqrt(mean_squared_error(ybe_test, Y_benzene)) rms_be # 1.12 when 50epochs, 5 trails, 5 exe be_sub = best_model_be.predict(test) be_sub # Creating a dir for Nitrogen in the similar way from keras_tuner import RandomSearch tuner_no = RandomSearch( model_builder, objective="val_mean_squared_error", max_trials=5, executions_per_trial=5, directory="no", project_name="No", ) # Creating Train test split from sklearn.model_selection import train_test_split X_train, X_test, yno_train, yno_test = train_test_split( X, y_no, test_size=0.3, random_state=0 ) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) tuner_no.search(X_train, yno_train, epochs=50, validation_data=(X_test, yno_test)) tuner_no.results_summary() best_model_no = tuner_no.get_best_models(num_models=1)[0] loss, mse = best_model_no.evaluate(X_test, yno_test) # Predicted value for NO Y_NO = best_model_no.predict(X_test) # Calculating Accuracy rms_no = sqrt(mean_squared_error(yno_test, Y_NO)) rms_no # 83.93 when 10epochs, 5 trails, 3 exe # Predicting actual test X for Nitrogen no_sub = best_model_no.predict(test) no_sub # ## Creating Submission submit.head() # Coverting the predictions to a dataframe y_sub_co = pd.DataFrame(co_sub, columns=["target_carbon_monoxide"]) y_sub_co y_sub_benz = pd.DataFrame(be_sub, columns=["target_benzene"]) print(y_sub_benz) y_sub_NO = pd.DataFrame(no_sub, columns=["target_nitrogen_oxides"]) y_sub_NO sub = pd.concat([submit["date_time"], y_sub_co, y_sub_benz, y_sub_NO], axis=1) sub # Downloading the file sub.to_csv("submission.csv", index=False) print("Your submission was successfully saved!")
false
0
2,371
0
2,371
2,371
69332031
<jupyter_start><jupyter_text>Waste Classification data <h1>WASTE CLASSIFICATION</h1> **PROBLEM**<br><br> Waste management is a big problem in our country. Most of the wastes end up in landfills. This leads to many issues like - Increase in landfills<br> - Eutrophication<br> - Consumption of toxic waste by animals<br> - Leachate<br> - Increase in toxins<br> - Land, water and air pollution<br><br> **APPROACH**<br><br> - Studied white papers on waste management<br> - Analysed the components of household waste<br> - Segregated into two classes (Organic and recyclable)<br> - Automated the process by using IOT and machine learning<br> - Reduce toxic waste ending in landfills<br><br> **IMPLEMENTATION**<br><br> Dataset is divided into train data (85%) and test data (15%)

 Training data - 22564 images
 Test data - 2513 images Kaggle dataset identifier: waste-classification-data <jupyter_script># # In this notebook, we are trying to classify the waste we have into Organic an Recyclable objects. The dataset we will work on is divided into train data (85%) and test data (15%). Training data - 22564 images and Test data - 2513 images. # In this notebook, we will have two model that we will compare there results. # For the comparison to be logic, we will use 50 epochs for each model even if the model does needed more epochs to converge. # importing the libraries needed import os import tensorflow as tf # Define model 1 model1 = tf.keras.Sequential( [ tf.keras.layers.Conv2D(64, (3, 3), input_shape=(150, 150, 3)), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(2), tf.keras.layers.Conv2D(128, (3, 3)), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(2), tf.keras.layers.Conv2D(256, (3, 3)), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(2), tf.keras.layers.Conv2D(512, (3, 3)), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation="relu"), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) model1.summary() model1.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss="binary_crossentropy", metrics=["accuracy"], ) train_datgen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest", ) train_generator = train_datgen.flow_from_directory( "../input/waste-classification-data/DATASET/TRAIN", target_size=(150, 150), batch_size=32, class_mode="binary", ) test_datgen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, ) test_generator = test_datgen.flow_from_directory( "../input/waste-classification-data/DATASET/TEST", target_size=(150, 150), batch_size=32, class_mode="binary", ) history = model1.fit_generator( train_generator, epochs=100, validation_data=test_generator ) import matplotlib.pyplot as plt acc = history.history["accuracy"] val_acc = history.history["val_accuracy"] loss = history.history["loss"] val_loss = history.history["val_loss"] epochs_range = range(len(acc)) plt.figure(figsize=(15, 15)) plt.subplot(2, 2, 1) plt.plot(epochs_range, acc, label="Training Accuracy") plt.plot(epochs_range, val_acc, label="Validation Accuracy") plt.legend(loc="lower right") plt.title("Training and Validation Accuracy") plt.subplot(2, 2, 2) plt.plot(epochs_range, loss, label="Training Loss") plt.plot(epochs_range, val_loss, label="Validation Loss") plt.legend(loc="upper right") plt.title("Training and Validation Loss") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332031.ipynb
waste-classification-data
techsash
[{"Id": 69332031, "ScriptId": 18903535, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6445211, "CreationDate": "07/29/2021 16:37:50", "VersionNumber": 2.0, "Title": "organicVsRecycableObjects", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 81.0, "LinesInsertedFromPrevious": 3.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 78.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92369248, "KernelVersionId": 69332031, "SourceDatasetVersionId": 497253}]
[{"Id": 497253, "DatasetId": 233210, "DatasourceVersionId": 513348, "CreatorUserId": 1274645, "LicenseName": "CC BY-SA 4.0", "CreationDate": "06/16/2019 03:24:52", "VersionNumber": 1.0, "Title": "Waste Classification data", "Slug": "waste-classification-data", "Subtitle": "This dataset contains 22500 images of organic and recyclable objects", "Description": "<h1>WASTE CLASSIFICATION</h1>\n\n**PROBLEM**<br><br>\nWaste management is a big problem in our country. Most of the wastes end up in landfills. This leads to many issues like\n\n- Increase in landfills<br>\n- Eutrophication<br>\n- Consumption of toxic waste by animals<br>\n- Leachate<br>\n- Increase in toxins<br>\n- Land, water and air pollution<br><br>\n\n**APPROACH**<br><br>\n- Studied white papers on waste management<br>\n- Analysed the components of household waste<br>\n- Segregated into two classes (Organic and recyclable)<br>\n- Automated the process by using IOT and machine learning<br>\n- Reduce toxic waste ending in landfills<br><br>\n\n**IMPLEMENTATION**<br><br>\nDataset is divided into train data (85%) and test data (15%)\u2028\u2028\n\nTraining data - 22564 images\u2028 Test data - 2513 images", "VersionNotes": "Initial release", "TotalCompressedBytes": 236162097.0, "TotalUncompressedBytes": 236162097.0}]
[{"Id": 233210, "CreatorUserId": 1274645, "OwnerUserId": 1274645.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 497253.0, "CurrentDatasourceVersionId": 513348.0, "ForumId": 244370, "Type": 2, "CreationDate": "06/16/2019 03:24:52", "LastActivityDate": "06/16/2019", "TotalViews": 193818, "TotalDownloads": 20104, "TotalVotes": 353, "TotalKernels": 83}]
[{"Id": 1274645, "UserName": "techsash", "DisplayName": "Sashaank Sekar", "RegisterDate": "09/18/2017", "PerformanceTier": 1}]
# # In this notebook, we are trying to classify the waste we have into Organic an Recyclable objects. The dataset we will work on is divided into train data (85%) and test data (15%). Training data - 22564 images and Test data - 2513 images. # In this notebook, we will have two model that we will compare there results. # For the comparison to be logic, we will use 50 epochs for each model even if the model does needed more epochs to converge. # importing the libraries needed import os import tensorflow as tf # Define model 1 model1 = tf.keras.Sequential( [ tf.keras.layers.Conv2D(64, (3, 3), input_shape=(150, 150, 3)), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(2), tf.keras.layers.Conv2D(128, (3, 3)), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(2), tf.keras.layers.Conv2D(256, (3, 3)), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(2), tf.keras.layers.Conv2D(512, (3, 3)), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation="relu"), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) model1.summary() model1.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss="binary_crossentropy", metrics=["accuracy"], ) train_datgen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest", ) train_generator = train_datgen.flow_from_directory( "../input/waste-classification-data/DATASET/TRAIN", target_size=(150, 150), batch_size=32, class_mode="binary", ) test_datgen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, ) test_generator = test_datgen.flow_from_directory( "../input/waste-classification-data/DATASET/TEST", target_size=(150, 150), batch_size=32, class_mode="binary", ) history = model1.fit_generator( train_generator, epochs=100, validation_data=test_generator ) import matplotlib.pyplot as plt acc = history.history["accuracy"] val_acc = history.history["val_accuracy"] loss = history.history["loss"] val_loss = history.history["val_loss"] epochs_range = range(len(acc)) plt.figure(figsize=(15, 15)) plt.subplot(2, 2, 1) plt.plot(epochs_range, acc, label="Training Accuracy") plt.plot(epochs_range, val_acc, label="Validation Accuracy") plt.legend(loc="lower right") plt.title("Training and Validation Accuracy") plt.subplot(2, 2, 2) plt.plot(epochs_range, loss, label="Training Loss") plt.plot(epochs_range, val_loss, label="Validation Loss") plt.legend(loc="upper right") plt.title("Training and Validation Loss") plt.show()
false
0
938
0
1,224
938
69332481
<jupyter_start><jupyter_text>roberta-base Kaggle dataset identifier: roberta-base <jupyter_script>import transformers from transformers import ( AutoTokenizer, AutoModel, AutoModelForMaskedLM, Trainer, TrainingArguments, DataCollatorForLanguageModeling, ) import pandas as pd import numpy as np import torch import torch.nn as nn from torch.utils.data.dataset import Dataset from sklearn import model_selection from sklearn import metrics from typing import Dict from transformers.tokenization_utils import PreTrainedTokenizer from tqdm import tqdm WARMUP_STEPS = 0 LEARNING_RATE = 5e-5 WEIGHT_DECAY = 0 EVAL_STEPS = 200 TRAIN_BATCH_SIZE = 16 VALID_BATCH_SIZE = 16 EPOCHS = 5 ROBERTA_MODEL = "../input/roberta-base" TRAINING_FILE = "../input/commonlitreadabilityprize/train.csv" TEST_FILE = "../input/commonlitreadabilityprize/test.csv" TEXT_PATH = "Data/text.txt" TOKENIZER = transformers.AutoTokenizer.from_pretrained( ROBERTA_MODEL, do_lower_case=True ) MODEL = transformers.AutoModelForMaskedLM.from_pretrained(ROBERTA_MODEL) class LineByLineTextDataset(Dataset): def __init__(self, data, tokenizer: PreTrainedTokenizer, block_size: int): data = data["excerpt"] lines = [line for line in data if (len(line) > 0 and not line.isspace())] batch_encoding = tokenizer( lines, add_special_tokens=True, truncation=True, max_length=block_size ) self.examples = batch_encoding["input_ids"] self.examples = [ {"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples ] def __len__(self): return len(self.examples) def __getitem__(self, i) -> Dict[str, torch.tensor]: return self.examples[i] train_data = pd.read_csv(TRAINING_FILE) train_dataset = LineByLineTextDataset(train_data, tokenizer=TOKENIZER, block_size=256) valid_dataset = LineByLineTextDataset(train_data, tokenizer=TOKENIZER, block_size=256) data_collator = DataCollatorForLanguageModeling( tokenizer=TOKENIZER, mlm=True, mlm_probability=0.15 ) training_args = TrainingArguments( output_dir="./results", overwrite_output_dir=True, num_train_epochs=EPOCHS, per_device_train_batch_size=TRAIN_BATCH_SIZE, per_device_eval_batch_size=VALID_BATCH_SIZE, evaluation_strategy="steps", save_total_limit=2, eval_steps=EVAL_STEPS, metric_for_best_model="eval_loss", greater_is_better=False, load_best_model_at_end=True, prediction_loss_only=True, warmup_steps=WARMUP_STEPS, weight_decay=WEIGHT_DECAY, report_to="none", logging_dir="./logs", ) trainer = Trainer( model=MODEL, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=valid_dataset, ) trainer.train() trainer.save_model("Models/clrp_roberta-pretrained") del trainer torch.cuda.empty_cache() import torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split from transformers import ( RobertaForSequenceClassification, AdamW, get_linear_schedule_with_warmup, ) from tqdm import trange import pandas as pd import io import numpy as np import matplotlib.pyplot as plt MODEL_PATH = "../input/roberta-base" MAX_LEN = 128 EPOCHS = 4 seed_val = 42 # del MODEL # torch.cuda.empty_cache() MODEL = RobertaForSequenceClassification.from_pretrained(MODEL_PATH, num_labels=1) # Tell pytorch to run this model on the GPU. MODEL.cuda() TOKENIZER = transformers.AutoTokenizer.from_pretrained( ROBERTA_MODEL, do_lower_case=True ) device = torch.device("cuda") sentences = train_data.excerpt.values labels = train_data.target.values # Extract input_ids and attention_mask from the tokens input_ids = [] attention_masks = [] for sent in sentences: encoded_dict = TOKENIZER.encode_plus( sent, # Sentence to encode. add_special_tokens=True, # Add '[CLS]' and '[SEP]' max_length=200, # Pad & truncate all sentences. pad_to_max_length=True, return_attention_mask=True, # Construct attn. masks. return_tensors="pt", # Return pytorch tensors. ) # Add the encoded sentence to the list. input_ids.append(encoded_dict["input_ids"]) # And its attention mask (simply differentiates padding from non-padding). attention_masks.append(encoded_dict["attention_mask"]) # Convert the lists into tensors. input_ids = torch.cat(input_ids, dim=0) attention_masks = torch.cat(attention_masks, dim=0) labels = torch.tensor(labels, dtype=torch.float) # Print sentence 0, now as a list of IDs. print("Original: ", sentences[0]) print("Token IDs:", input_ids[0]) from torch.utils.data import TensorDataset, random_split # Combine the training inputs into a TensorDataset. dataset = TensorDataset(input_ids, attention_masks, labels) # Create a 90-10 train-validation split. # Calculate the number of samples to include in each set. train_size = int(0.9 * len(dataset)) val_size = len(dataset) - train_size # Divide the dataset by randomly selecting samples. train_dataset, val_dataset = random_split(dataset, [train_size, val_size]) print("{:>5,} training samples".format(train_size)) print("{:>5,} validation samples".format(val_size)) from torch.utils.data import DataLoader, RandomSampler, SequentialSampler # The DataLoader needs to know our batch size for training, so we specify it # here. For fine-tuning BERT on a specific task, the authors recommend a batch # size of 16 or 32. batch_size = 32 # Create the DataLoaders for our training and validation sets. # We'll take training samples in random order. train_dataloader = DataLoader( train_dataset, # The training samples. sampler=RandomSampler(train_dataset), # Select batches randomly batch_size=batch_size, # Trains with this batch size. ) # For validation the order doesn't matter, so we'll just read them sequentially. validation_dataloader = DataLoader( val_dataset, # The validation samples. sampler=SequentialSampler(val_dataset), # Pull out batches sequentially. batch_size=batch_size, # Evaluate with this batch size. ) optimizer = AdamW( MODEL.parameters(), lr=2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5 eps=1e-8, # args.adam_epsilon - default is 1e-8. ) total_steps = len(train_dataloader) * EPOCHS # Create the learning rate scheduler. scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, # Default value in run_glue.py num_training_steps=total_steps, ) import numpy as np # Function to calculate the accuracy of our predictions vs labels def flat_accuracy(preds, labels): pred_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat) / len(labels_flat) import time import datetime def format_time(elapsed): """ Takes a time in seconds and returns a string hh:mm:ss """ # Round to the nearest second. elapsed_rounded = int(round((elapsed))) # Format as hh:mm:ss return str(datetime.timedelta(seconds=elapsed_rounded)) import random import numpy as np # This training code is based on the `run_glue.py` script here: # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) # We'll store a number of quantities such as training and validation loss, # validation accuracy, and timings. training_stats = [] # Measure the total training time for the whole run. total_t0 = time.time() # For each epoch... for epoch_i in range(0, EPOCHS): # ======================================== # Training # ======================================== # Perform one full pass over the training set. print("") print("======== Epoch {:} / {:} ========".format(epoch_i + 1, EPOCHS)) print("Training...") # Measure how long the training epoch takes. t0 = time.time() # Reset the total loss for this epoch. total_train_loss = 0 # Put the model into training mode. Don't be mislead--the call to # `train` just changes the *mode*, it doesn't *perform* the training. # `dropout` and `batchnorm` layers behave differently during training # vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch) MODEL.train() # For each batch of training data... for step, batch in enumerate(train_dataloader): # Progress update every 40 batches. if step % 40 == 0 and not step == 0: # Calculate elapsed time in minutes. elapsed = format_time(time.time() - t0) # Report progress. print( " Batch {:>5,} of {:>5,}. Elapsed: {:}.".format( step, len(train_dataloader), elapsed ) ) # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using the # `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) # Always clear any previously calculated gradients before performing a # backward pass. PyTorch doesn't do this automatically because # accumulating the gradients is "convenient while training RNNs". # (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch) MODEL.zero_grad() # Perform a forward pass (evaluate the model on this training batch). # In PyTorch, calling `model` will in turn call the model's `forward` # function and pass down the arguments. The `forward` function is # documented here: # https://huggingface.co/transformers/model_doc/bert.html#bertforsequenceclassification # The results are returned in a results object, documented here: # https://huggingface.co/transformers/main_classes/output.html#transformers.modeling_outputs.SequenceClassifierOutput # Specifically, we'll get the loss (because we provided labels) and the # "logits"--the model outputs prior to activation. result = MODEL( b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels, return_dict=True, ) loss = result.loss logits = result.logits # Accumulate the training loss over all of the batches so that we can # calculate the average loss at the end. `loss` is a Tensor containing a # single value; the `.item()` function just returns the Python value # from the tensor. total_train_loss += loss.item() # Perform a backward pass to calculate the gradients. loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(MODEL.parameters(), 1.0) # Update parameters and take a step using the computed gradient. # The optimizer dictates the "update rule"--how the parameters are # modified based on their gradients, the learning rate, etc. optimizer.step() # Update the learning rate. scheduler.step() # Calculate the average loss over all of the batches. avg_train_loss = total_train_loss / len(train_dataloader) # Measure how long this epoch took. training_time = format_time(time.time() - t0) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(training_time)) # ======================================== # Validation # ======================================== # After the completion of each training epoch, measure our performance on # our validation set. print("") print("Running Validation...") t0 = time.time() # Put the model in evaluation mode--the dropout layers behave differently # during evaluation. MODEL.eval() # Tracking variables total_eval_accuracy = 0 total_eval_loss = 0 nb_eval_steps = 0 # Evaluate data for one epoch for batch in validation_dataloader: # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using # the `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) # Tell pytorch not to bother with constructing the compute graph during # the forward pass, since this is only needed for backprop (training). with torch.no_grad(): # Forward pass, calculate logit predictions. # token_type_ids is the same as the "segment ids", which result = MODEL( b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels, return_dict=True, ) # Get the loss and "logits" output by the model. The "logits" are the # output values prior to applying an activation function like the # softmax. loss = result.loss logits = result.logits # Accumulate the validation loss. total_eval_loss += loss.item() # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to("cpu").numpy() # Calculate the accuracy for this batch of test sentences, and # accumulate it over all batches. total_eval_accuracy += flat_accuracy(logits, label_ids) # Report the final accuracy for this validation run. avg_val_accuracy = total_eval_accuracy / len(validation_dataloader) print(" Accuracy: {0:.2f}".format(avg_val_accuracy)) # Calculate the average loss over all of the batches. avg_val_loss = total_eval_loss / len(validation_dataloader) # Measure how long the validation run took. validation_time = format_time(time.time() - t0) print(" Validation Loss: {0:.2f}".format(avg_val_loss)) print(" Validation took: {:}".format(validation_time)) # Record all statistics from this epoch. training_stats.append( { "epoch": epoch_i + 1, "Training Loss": avg_train_loss, "Valid. Loss": avg_val_loss, "Valid. Accur.": avg_val_accuracy, "Training Time": training_time, "Validation Time": validation_time, } ) print("") print("Training complete!") print("Total training took {:} (h:mm:ss)".format(format_time(time.time() - total_t0))) torch.cuda.empty_cache() import pandas as pd # Display floats with two decimal places. pd.set_option("precision", 2) # Create a DataFrame from our training statistics. df_stats = pd.DataFrame(data=training_stats) # Use the 'epoch' as the row index. df_stats = df_stats.set_index("epoch") # A hack to force the column headers to wrap. # df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])]) # Display the table. df_stats import matplotlib.pyplot as plt import seaborn as sns # Use plot styling from seaborn. sns.set(style="darkgrid") # Increase the plot size and font size. sns.set(font_scale=1.5) plt.rcParams["figure.figsize"] = (12, 6) # Plot the learning curve. plt.plot(df_stats["Training Loss"], "b-o", label="Training") plt.plot(df_stats["Valid. Loss"], "g-o", label="Validation") # Label the plot. plt.title("Training & Validation Loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.xticks([1, 2, 3, 4]) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332481.ipynb
roberta-base
abhishek
[{"Id": 69332481, "ScriptId": 18886486, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5797807, "CreationDate": "07/29/2021 16:44:41", "VersionNumber": 2.0, "Title": "CLRP - kauvinlucas", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 477.0, "LinesInsertedFromPrevious": 350.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 127.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92370477, "KernelVersionId": 69332481, "SourceDatasetVersionId": 1042664}]
[{"Id": 1042664, "DatasetId": 575905, "DatasourceVersionId": 1071866, "CreatorUserId": 5309, "LicenseName": "CC0: Public Domain", "CreationDate": "03/28/2020 21:42:54", "VersionNumber": 1.0, "Title": "roberta-base", "Slug": "roberta-base", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 575905, "CreatorUserId": 5309, "OwnerUserId": 5309.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2795202.0, "CurrentDatasourceVersionId": 2841202.0, "ForumId": 589699, "Type": 2, "CreationDate": "03/28/2020 21:42:54", "LastActivityDate": "03/28/2020", "TotalViews": 13289, "TotalDownloads": 5845, "TotalVotes": 180, "TotalKernels": 326}]
[{"Id": 5309, "UserName": "abhishek", "DisplayName": "Abhishek Thakur", "RegisterDate": "01/12/2011", "PerformanceTier": 4}]
import transformers from transformers import ( AutoTokenizer, AutoModel, AutoModelForMaskedLM, Trainer, TrainingArguments, DataCollatorForLanguageModeling, ) import pandas as pd import numpy as np import torch import torch.nn as nn from torch.utils.data.dataset import Dataset from sklearn import model_selection from sklearn import metrics from typing import Dict from transformers.tokenization_utils import PreTrainedTokenizer from tqdm import tqdm WARMUP_STEPS = 0 LEARNING_RATE = 5e-5 WEIGHT_DECAY = 0 EVAL_STEPS = 200 TRAIN_BATCH_SIZE = 16 VALID_BATCH_SIZE = 16 EPOCHS = 5 ROBERTA_MODEL = "../input/roberta-base" TRAINING_FILE = "../input/commonlitreadabilityprize/train.csv" TEST_FILE = "../input/commonlitreadabilityprize/test.csv" TEXT_PATH = "Data/text.txt" TOKENIZER = transformers.AutoTokenizer.from_pretrained( ROBERTA_MODEL, do_lower_case=True ) MODEL = transformers.AutoModelForMaskedLM.from_pretrained(ROBERTA_MODEL) class LineByLineTextDataset(Dataset): def __init__(self, data, tokenizer: PreTrainedTokenizer, block_size: int): data = data["excerpt"] lines = [line for line in data if (len(line) > 0 and not line.isspace())] batch_encoding = tokenizer( lines, add_special_tokens=True, truncation=True, max_length=block_size ) self.examples = batch_encoding["input_ids"] self.examples = [ {"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples ] def __len__(self): return len(self.examples) def __getitem__(self, i) -> Dict[str, torch.tensor]: return self.examples[i] train_data = pd.read_csv(TRAINING_FILE) train_dataset = LineByLineTextDataset(train_data, tokenizer=TOKENIZER, block_size=256) valid_dataset = LineByLineTextDataset(train_data, tokenizer=TOKENIZER, block_size=256) data_collator = DataCollatorForLanguageModeling( tokenizer=TOKENIZER, mlm=True, mlm_probability=0.15 ) training_args = TrainingArguments( output_dir="./results", overwrite_output_dir=True, num_train_epochs=EPOCHS, per_device_train_batch_size=TRAIN_BATCH_SIZE, per_device_eval_batch_size=VALID_BATCH_SIZE, evaluation_strategy="steps", save_total_limit=2, eval_steps=EVAL_STEPS, metric_for_best_model="eval_loss", greater_is_better=False, load_best_model_at_end=True, prediction_loss_only=True, warmup_steps=WARMUP_STEPS, weight_decay=WEIGHT_DECAY, report_to="none", logging_dir="./logs", ) trainer = Trainer( model=MODEL, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=valid_dataset, ) trainer.train() trainer.save_model("Models/clrp_roberta-pretrained") del trainer torch.cuda.empty_cache() import torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split from transformers import ( RobertaForSequenceClassification, AdamW, get_linear_schedule_with_warmup, ) from tqdm import trange import pandas as pd import io import numpy as np import matplotlib.pyplot as plt MODEL_PATH = "../input/roberta-base" MAX_LEN = 128 EPOCHS = 4 seed_val = 42 # del MODEL # torch.cuda.empty_cache() MODEL = RobertaForSequenceClassification.from_pretrained(MODEL_PATH, num_labels=1) # Tell pytorch to run this model on the GPU. MODEL.cuda() TOKENIZER = transformers.AutoTokenizer.from_pretrained( ROBERTA_MODEL, do_lower_case=True ) device = torch.device("cuda") sentences = train_data.excerpt.values labels = train_data.target.values # Extract input_ids and attention_mask from the tokens input_ids = [] attention_masks = [] for sent in sentences: encoded_dict = TOKENIZER.encode_plus( sent, # Sentence to encode. add_special_tokens=True, # Add '[CLS]' and '[SEP]' max_length=200, # Pad & truncate all sentences. pad_to_max_length=True, return_attention_mask=True, # Construct attn. masks. return_tensors="pt", # Return pytorch tensors. ) # Add the encoded sentence to the list. input_ids.append(encoded_dict["input_ids"]) # And its attention mask (simply differentiates padding from non-padding). attention_masks.append(encoded_dict["attention_mask"]) # Convert the lists into tensors. input_ids = torch.cat(input_ids, dim=0) attention_masks = torch.cat(attention_masks, dim=0) labels = torch.tensor(labels, dtype=torch.float) # Print sentence 0, now as a list of IDs. print("Original: ", sentences[0]) print("Token IDs:", input_ids[0]) from torch.utils.data import TensorDataset, random_split # Combine the training inputs into a TensorDataset. dataset = TensorDataset(input_ids, attention_masks, labels) # Create a 90-10 train-validation split. # Calculate the number of samples to include in each set. train_size = int(0.9 * len(dataset)) val_size = len(dataset) - train_size # Divide the dataset by randomly selecting samples. train_dataset, val_dataset = random_split(dataset, [train_size, val_size]) print("{:>5,} training samples".format(train_size)) print("{:>5,} validation samples".format(val_size)) from torch.utils.data import DataLoader, RandomSampler, SequentialSampler # The DataLoader needs to know our batch size for training, so we specify it # here. For fine-tuning BERT on a specific task, the authors recommend a batch # size of 16 or 32. batch_size = 32 # Create the DataLoaders for our training and validation sets. # We'll take training samples in random order. train_dataloader = DataLoader( train_dataset, # The training samples. sampler=RandomSampler(train_dataset), # Select batches randomly batch_size=batch_size, # Trains with this batch size. ) # For validation the order doesn't matter, so we'll just read them sequentially. validation_dataloader = DataLoader( val_dataset, # The validation samples. sampler=SequentialSampler(val_dataset), # Pull out batches sequentially. batch_size=batch_size, # Evaluate with this batch size. ) optimizer = AdamW( MODEL.parameters(), lr=2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5 eps=1e-8, # args.adam_epsilon - default is 1e-8. ) total_steps = len(train_dataloader) * EPOCHS # Create the learning rate scheduler. scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, # Default value in run_glue.py num_training_steps=total_steps, ) import numpy as np # Function to calculate the accuracy of our predictions vs labels def flat_accuracy(preds, labels): pred_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat) / len(labels_flat) import time import datetime def format_time(elapsed): """ Takes a time in seconds and returns a string hh:mm:ss """ # Round to the nearest second. elapsed_rounded = int(round((elapsed))) # Format as hh:mm:ss return str(datetime.timedelta(seconds=elapsed_rounded)) import random import numpy as np # This training code is based on the `run_glue.py` script here: # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) # We'll store a number of quantities such as training and validation loss, # validation accuracy, and timings. training_stats = [] # Measure the total training time for the whole run. total_t0 = time.time() # For each epoch... for epoch_i in range(0, EPOCHS): # ======================================== # Training # ======================================== # Perform one full pass over the training set. print("") print("======== Epoch {:} / {:} ========".format(epoch_i + 1, EPOCHS)) print("Training...") # Measure how long the training epoch takes. t0 = time.time() # Reset the total loss for this epoch. total_train_loss = 0 # Put the model into training mode. Don't be mislead--the call to # `train` just changes the *mode*, it doesn't *perform* the training. # `dropout` and `batchnorm` layers behave differently during training # vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch) MODEL.train() # For each batch of training data... for step, batch in enumerate(train_dataloader): # Progress update every 40 batches. if step % 40 == 0 and not step == 0: # Calculate elapsed time in minutes. elapsed = format_time(time.time() - t0) # Report progress. print( " Batch {:>5,} of {:>5,}. Elapsed: {:}.".format( step, len(train_dataloader), elapsed ) ) # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using the # `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) # Always clear any previously calculated gradients before performing a # backward pass. PyTorch doesn't do this automatically because # accumulating the gradients is "convenient while training RNNs". # (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch) MODEL.zero_grad() # Perform a forward pass (evaluate the model on this training batch). # In PyTorch, calling `model` will in turn call the model's `forward` # function and pass down the arguments. The `forward` function is # documented here: # https://huggingface.co/transformers/model_doc/bert.html#bertforsequenceclassification # The results are returned in a results object, documented here: # https://huggingface.co/transformers/main_classes/output.html#transformers.modeling_outputs.SequenceClassifierOutput # Specifically, we'll get the loss (because we provided labels) and the # "logits"--the model outputs prior to activation. result = MODEL( b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels, return_dict=True, ) loss = result.loss logits = result.logits # Accumulate the training loss over all of the batches so that we can # calculate the average loss at the end. `loss` is a Tensor containing a # single value; the `.item()` function just returns the Python value # from the tensor. total_train_loss += loss.item() # Perform a backward pass to calculate the gradients. loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(MODEL.parameters(), 1.0) # Update parameters and take a step using the computed gradient. # The optimizer dictates the "update rule"--how the parameters are # modified based on their gradients, the learning rate, etc. optimizer.step() # Update the learning rate. scheduler.step() # Calculate the average loss over all of the batches. avg_train_loss = total_train_loss / len(train_dataloader) # Measure how long this epoch took. training_time = format_time(time.time() - t0) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(training_time)) # ======================================== # Validation # ======================================== # After the completion of each training epoch, measure our performance on # our validation set. print("") print("Running Validation...") t0 = time.time() # Put the model in evaluation mode--the dropout layers behave differently # during evaluation. MODEL.eval() # Tracking variables total_eval_accuracy = 0 total_eval_loss = 0 nb_eval_steps = 0 # Evaluate data for one epoch for batch in validation_dataloader: # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using # the `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) # Tell pytorch not to bother with constructing the compute graph during # the forward pass, since this is only needed for backprop (training). with torch.no_grad(): # Forward pass, calculate logit predictions. # token_type_ids is the same as the "segment ids", which result = MODEL( b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels, return_dict=True, ) # Get the loss and "logits" output by the model. The "logits" are the # output values prior to applying an activation function like the # softmax. loss = result.loss logits = result.logits # Accumulate the validation loss. total_eval_loss += loss.item() # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to("cpu").numpy() # Calculate the accuracy for this batch of test sentences, and # accumulate it over all batches. total_eval_accuracy += flat_accuracy(logits, label_ids) # Report the final accuracy for this validation run. avg_val_accuracy = total_eval_accuracy / len(validation_dataloader) print(" Accuracy: {0:.2f}".format(avg_val_accuracy)) # Calculate the average loss over all of the batches. avg_val_loss = total_eval_loss / len(validation_dataloader) # Measure how long the validation run took. validation_time = format_time(time.time() - t0) print(" Validation Loss: {0:.2f}".format(avg_val_loss)) print(" Validation took: {:}".format(validation_time)) # Record all statistics from this epoch. training_stats.append( { "epoch": epoch_i + 1, "Training Loss": avg_train_loss, "Valid. Loss": avg_val_loss, "Valid. Accur.": avg_val_accuracy, "Training Time": training_time, "Validation Time": validation_time, } ) print("") print("Training complete!") print("Total training took {:} (h:mm:ss)".format(format_time(time.time() - total_t0))) torch.cuda.empty_cache() import pandas as pd # Display floats with two decimal places. pd.set_option("precision", 2) # Create a DataFrame from our training statistics. df_stats = pd.DataFrame(data=training_stats) # Use the 'epoch' as the row index. df_stats = df_stats.set_index("epoch") # A hack to force the column headers to wrap. # df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])]) # Display the table. df_stats import matplotlib.pyplot as plt import seaborn as sns # Use plot styling from seaborn. sns.set(style="darkgrid") # Increase the plot size and font size. sns.set(font_scale=1.5) plt.rcParams["figure.figsize"] = (12, 6) # Plot the learning curve. plt.plot(df_stats["Training Loss"], "b-o", label="Training") plt.plot(df_stats["Valid. Loss"], "g-o", label="Validation") # Label the plot. plt.title("Training & Validation Loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.xticks([1, 2, 3, 4]) plt.show()
false
0
4,393
0
4,416
4,393
69332686
<jupyter_start><jupyter_text>Brian Tumor Dataset ### Context This dataset consists of the scanned images of brain of patient diagnosed of brain tumour. ### Content Separated files for train and test data with separating features and labels Kaggle dataset identifier: brian-tumor-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import tensorflow as tf from tensorflow import keras from keras.preprocessing.image import ImageDataGenerator gen = ImageDataGenerator( rescale=1.0 / 255, validation_split=0.2, zoom_range=(0.99, 0.99), dtype=tf.float32 ) train = gen.flow_from_directory( "/kaggle/input/brian-tumor-dataset/Brain Tumor Data Set/Brain Tumor Data Set/", target_size=(150, 150), batch_size=256, class_mode="binary", color_mode="rgb", shuffle=True, seed=123, subset="training", ) val = gen.flow_from_directory( "/kaggle/input/brian-tumor-dataset/Brain Tumor Data Set/Brain Tumor Data Set/", target_size=(150, 150), batch_size=8, class_mode="binary", color_mode="rgb", shuffle=True, seed=123, subset="validation", ) classes = val.class_indices # # With grayscale as color mode we get high spikes in validation loss in training and substantially lower accuracy compared with a dataset with rgb color mode. classes import seaborn as sns # # Class distribution in training dataset t = 0 h = 0 for i in range(15): a, b = next(train) for j in b: if j == 1: h += 1 else: t += 1 sns.barplot(x=["tumor", "healty"], y=[t, h]) import matplotlib.pyplot as plt batch = next(train) plt.imshow(batch[0][0]) # # Simple cnn from keras.layers import ( Conv2D, MaxPool2D, LeakyReLU, BatchNormalization, Dropout, Dense, InputLayer, Flatten, ) from keras.losses import BinaryCrossentropy from keras.optimizers import Adam model = keras.Sequential() model.add(InputLayer(input_shape=(150, 150, 3))) model.add(Conv2D(filters=32, kernel_size=3, activation="relu", padding="same")) model.add(MaxPool2D()) model.add(Conv2D(filters=64, kernel_size=3, activation="relu", padding="same")) model.add(MaxPool2D()) model.add(Flatten()) model.add(Dense(128, activation="relu")) model.add(BatchNormalization()) model.add(Dropout(rate=0.3)) model.add(Dense(64, activation="relu")) model.add(BatchNormalization()) model.add(Dropout(rate=0.3)) model.add(Dense(1, activation="sigmoid")) model.compile(optimizer=Adam(0.001), loss=BinaryCrossentropy(), metrics=["accuracy"]) model.summary() # # Model plot tf.keras.utils.plot_model( model, to_file="model.png", show_shapes=True, show_layer_names=True, ) from keras import utils, callbacks earlystopping = callbacks.EarlyStopping( monitor="val_loss", mode="min", patience=5, restore_best_weights=True ) history = model.fit( train, verbose=1, callbacks=[earlystopping], epochs=20, validation_data=(val) ) # # Plotting accuracy plt.plot(history.history["accuracy"], label="accuracy") plt.plot(history.history["val_accuracy"], label="val_accuracy") plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.ylim([0, 1]) plt.legend(loc="lower right") # # Plotting loss plt.plot(history.history["loss"], label="loss") plt.plot(history.history["val_loss"], label="val_loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.ylim([0, 1]) plt.legend(loc="lower right")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332686.ipynb
brian-tumor-dataset
preetviradiya
[{"Id": 69332686, "ScriptId": 18926402, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7874654, "CreationDate": "07/29/2021 16:48:13", "VersionNumber": 1.0, "Title": "Brain tumor classification with simple CNN", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 134.0, "LinesInsertedFromPrevious": 134.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 51}]
[{"Id": 92371047, "KernelVersionId": 69332686, "SourceDatasetVersionId": 2236708}]
[{"Id": 2236708, "DatasetId": 1343913, "DatasourceVersionId": 2278530, "CreatorUserId": 5456766, "LicenseName": "GPL 2", "CreationDate": "05/16/2021 10:20:25", "VersionNumber": 1.0, "Title": "Brian Tumor Dataset", "Slug": "brian-tumor-dataset", "Subtitle": "X-Ray images of Brain", "Description": "### Context\n\nThis dataset consists of the scanned images of brain of patient diagnosed of brain tumour.\n\n### Content\nSeparated files for train and test data with separating features and labels\n\n### Acknowledgements\nWe wouldn't be here without the help of others. If you owe any attributions or thanks, include them here along with any citations of past research.\n\n### Inspiration\nYour data will be in front of the world's largest data science community. What questions do you want to see answered?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1343913, "CreatorUserId": 5456766, "OwnerUserId": 5456766.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2236708.0, "CurrentDatasourceVersionId": 2278530.0, "ForumId": 1362909, "Type": 2, "CreationDate": "05/16/2021 10:20:25", "LastActivityDate": "05/16/2021", "TotalViews": 42814, "TotalDownloads": 5355, "TotalVotes": 87, "TotalKernels": 38}]
[{"Id": 5456766, "UserName": "preetviradiya", "DisplayName": "Preet Viradiya", "RegisterDate": "07/12/2020", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import tensorflow as tf from tensorflow import keras from keras.preprocessing.image import ImageDataGenerator gen = ImageDataGenerator( rescale=1.0 / 255, validation_split=0.2, zoom_range=(0.99, 0.99), dtype=tf.float32 ) train = gen.flow_from_directory( "/kaggle/input/brian-tumor-dataset/Brain Tumor Data Set/Brain Tumor Data Set/", target_size=(150, 150), batch_size=256, class_mode="binary", color_mode="rgb", shuffle=True, seed=123, subset="training", ) val = gen.flow_from_directory( "/kaggle/input/brian-tumor-dataset/Brain Tumor Data Set/Brain Tumor Data Set/", target_size=(150, 150), batch_size=8, class_mode="binary", color_mode="rgb", shuffle=True, seed=123, subset="validation", ) classes = val.class_indices # # With grayscale as color mode we get high spikes in validation loss in training and substantially lower accuracy compared with a dataset with rgb color mode. classes import seaborn as sns # # Class distribution in training dataset t = 0 h = 0 for i in range(15): a, b = next(train) for j in b: if j == 1: h += 1 else: t += 1 sns.barplot(x=["tumor", "healty"], y=[t, h]) import matplotlib.pyplot as plt batch = next(train) plt.imshow(batch[0][0]) # # Simple cnn from keras.layers import ( Conv2D, MaxPool2D, LeakyReLU, BatchNormalization, Dropout, Dense, InputLayer, Flatten, ) from keras.losses import BinaryCrossentropy from keras.optimizers import Adam model = keras.Sequential() model.add(InputLayer(input_shape=(150, 150, 3))) model.add(Conv2D(filters=32, kernel_size=3, activation="relu", padding="same")) model.add(MaxPool2D()) model.add(Conv2D(filters=64, kernel_size=3, activation="relu", padding="same")) model.add(MaxPool2D()) model.add(Flatten()) model.add(Dense(128, activation="relu")) model.add(BatchNormalization()) model.add(Dropout(rate=0.3)) model.add(Dense(64, activation="relu")) model.add(BatchNormalization()) model.add(Dropout(rate=0.3)) model.add(Dense(1, activation="sigmoid")) model.compile(optimizer=Adam(0.001), loss=BinaryCrossentropy(), metrics=["accuracy"]) model.summary() # # Model plot tf.keras.utils.plot_model( model, to_file="model.png", show_shapes=True, show_layer_names=True, ) from keras import utils, callbacks earlystopping = callbacks.EarlyStopping( monitor="val_loss", mode="min", patience=5, restore_best_weights=True ) history = model.fit( train, verbose=1, callbacks=[earlystopping], epochs=20, validation_data=(val) ) # # Plotting accuracy plt.plot(history.history["accuracy"], label="accuracy") plt.plot(history.history["val_accuracy"], label="val_accuracy") plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.ylim([0, 1]) plt.legend(loc="lower right") # # Plotting loss plt.plot(history.history["loss"], label="loss") plt.plot(history.history["val_loss"], label="val_loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.ylim([0, 1]) plt.legend(loc="lower right")
false
0
1,171
51
1,242
1,171
69332614
<jupyter_start><jupyter_text>Bike Share Case Study Following are the data set offered for my Google analytic certificate final project Kaggle dataset identifier: bike-share-case-study <jupyter_script># ### In this session we are going to: # 1. Clean the data # 2. Bussiness task look back # 2. Form questions base on the data at hand # 3. Do visaulization and form conclusion # (Note: To save my poor laptop, forgive me not to present you with every results of my code. I will run the neccesary codes for easier understanding. You are welcomed to use the dataset offered and run the codes for yourself. Any comment and suggestion are also welcomed!) import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import glob all_files = glob.glob(os.path.join("../input/bike-share-case-study", "*.csv")) df = pd.concat((pd.read_csv(f) for f in all_files)) df.head() # --- # ### 1. Data cleansing # First we drop rows with empty cell df.dropna(inplace=True) # Then we seperate date from time df[["started_date", "started_time"]] = df["started_at"].str.split(" ", 1, expand=True) df[["end_date", "end_time"]] = df["ended_at"].str.split(" ", 1, expand=True) df = df.drop(["started_at", "ended_at"], axis=1) # Next, since we are not using the location data, we are dropping it to save memory df = df.drop(["start_lat", "start_lng", "end_lat", "end_lng"], axis=1) # Going further, we found that there are weird '###' in our ride_length column, let's clean it, and seperate them into hour and minute column df = df[ df.ride_length != "#######################################################################################################################################################################################################" ] df = df[ df.ride_length != "###############################################################################################################################################################################################################################################################" ] df[["ridelength_hour", "ridelength_minute"]] = df["ride_length"].str.split( ":", 1, expand=True ) df = df.drop(["ride_length"], axis=1) # To calculate mean of the ride_length, we need to convert it into int data type df.astype({"ridelength_hour": "int64"}).dtypes ridelength_hour_list = df.ridelength_hour.tolist() hour_list = [int(s) for s in ridelength_hour_list] hour_to_minute_list = [x * 60 for x in hour_list] df["hour_to_minute"] = hour_to_minute_list ridelength_minute_list = df.ridelength_minute.tolist() minute_list = [int(s) for s in ridelength_minute_list] df["minute_to_int"] = minute_list df = df.drop(["ridelength_hour", "ridelength_minute"], axis=1) sum_list = [] for x, y in zip(minute_list, hour_to_minute_list): sum_list.append(x + y) filtered = [] for x in sum_list: if x >= 60: filtered.append(x) df["ridelength_in_minutes"] = sum_list df = df.drop(["hour_to_minute", "minute_to_int"], axis=1) # #### Now our dataset are clean and tidy, it's now ready for analysis! # ---- # ### 2. Basis calculation and sum up df.to_csv("df_clean.csv", index=False) df_cleaned = pd.read_csv("../input/cleaned-data/df_clean.csv") # Here I'm just exporting the cleaned file then import it back so that I don't need to run every cells again everytime I reopen it df_cleaned.head() # --- # #### Before the calculation started, here's a quick look to our "Bussiness task" # Three questions will guide the future marketing program for the sharing bike company: # 1. How do annual members and casual riders use Cyclistic bikes differently? # 2. Why would casual riders buy Cyclistic annual memberships? # 3. How can Cyclistic use digital media to influence casual riders to become members? # We are responsible for the first question: How do annual members and casual riders use Cyclistic bikes differently? And we are going to answer it by dividing it into the **following 3 questions**: # 1. How do ride_length differ from casual and member riders? Why is there a difference?(Can't be done, leave it for now) # 2. When do each type ride mostly? Weektime? Weekend? Why is there a difference? # 3. Who ride electrical bike more? Why? # ----- # #### First : How do ride_length differs? df_casual = df_cleaned.loc[df_cleaned["member_casual"] == "casual"] df_member = df_cleaned.loc[df_cleaned["member_casual"] == "member"] df_member.describe() df_casual.describe() # #### Answer : # Casual riders have average ride length of about 34 minutes while member have an average of only 14 minutes. # -------- # #### Second: When do each type ride? df_member.groupby(by=["day_of_week"]).sum() df_casual.groupby(by=["day_of_week"]).sum() casual_count = [12833150, 6314262, 5650729, 5762943, 5755590, 8193701, 14747435] member_count = [4703186, 4145138, 4445319, 4767769, 4443507, 4731885, 5352431] day_of_week = [1, 2, 3, 4, 5, 6, 7] import matplotlib.pyplot as plt from matplotlib.pyplot import figure figure(figsize=(12, 6), dpi=80) plt.bar(day_of_week, casual_count) plt.bar(day_of_week, member_count) plt.xlabel("Day of Week") plt.ylabel("Ride count") plt.title("Ride pattern of members and casual riders") plt.legend(["Casual riders", "Members"]) plt.show() # #### Answer : # Member rides are evenly distributed during the week, while casual riders ride more often on weekends. # -------------- # #### Last but not least: What type of bikes does each group ride more? df_casual.groupby(by=["rideable_type", "day_of_week"]).sum() df_member.groupby(by=["rideable_type", "day_of_week"]).sum() # We want to graph the pattern of rideable_type to ridelength_in_minutes with week_day as hue, so we are going to use seaborn instead of matplotlib member_casual_sum = pd.read_csv("../input/member-casual-sum/member_casual_sum.csv") # The cell above is the sum of both casual riders and members in 1 csv file member_casual_sum.head() import seaborn as sns plot = sns.catplot( x="day_of_week", y="ridelength_in_minutes", hue="rideable_type", col="member_casual", data=member_casual_sum, kind="bar", height=4, aspect=1.5, ) plot.fig.subplots_adjust(top=0.8) plot.fig.suptitle("What bikes do they ride?")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/332/69332614.ipynb
bike-share-case-study
lalilata
[{"Id": 69332614, "ScriptId": 18714440, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5471465, "CreationDate": "07/29/2021 16:46:56", "VersionNumber": 8.0, "Title": "Google data analytic final project: Cleaning", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 190.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 186.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92370754, "KernelVersionId": 69332614, "SourceDatasetVersionId": 2444249}]
[{"Id": 2444249, "DatasetId": 1479062, "DatasourceVersionId": 2486547, "CreatorUserId": 5471465, "LicenseName": "Unknown", "CreationDate": "07/20/2021 09:05:54", "VersionNumber": 1.0, "Title": "Bike Share Case Study", "Slug": "bike-share-case-study", "Subtitle": "Data from a fictional bike sharing company", "Description": "Following are the data set offered for my Google analytic certificate final project", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1479062, "CreatorUserId": 5471465, "OwnerUserId": 5471465.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2444249.0, "CurrentDatasourceVersionId": 2486547.0, "ForumId": 1498726, "Type": 2, "CreationDate": "07/20/2021 09:05:54", "LastActivityDate": "07/20/2021", "TotalViews": 1115, "TotalDownloads": 10, "TotalVotes": 1, "TotalKernels": 1}]
[{"Id": 5471465, "UserName": "lalilata", "DisplayName": "Sunsihlu", "RegisterDate": "07/15/2020", "PerformanceTier": 0}]
# ### In this session we are going to: # 1. Clean the data # 2. Bussiness task look back # 2. Form questions base on the data at hand # 3. Do visaulization and form conclusion # (Note: To save my poor laptop, forgive me not to present you with every results of my code. I will run the neccesary codes for easier understanding. You are welcomed to use the dataset offered and run the codes for yourself. Any comment and suggestion are also welcomed!) import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import glob all_files = glob.glob(os.path.join("../input/bike-share-case-study", "*.csv")) df = pd.concat((pd.read_csv(f) for f in all_files)) df.head() # --- # ### 1. Data cleansing # First we drop rows with empty cell df.dropna(inplace=True) # Then we seperate date from time df[["started_date", "started_time"]] = df["started_at"].str.split(" ", 1, expand=True) df[["end_date", "end_time"]] = df["ended_at"].str.split(" ", 1, expand=True) df = df.drop(["started_at", "ended_at"], axis=1) # Next, since we are not using the location data, we are dropping it to save memory df = df.drop(["start_lat", "start_lng", "end_lat", "end_lng"], axis=1) # Going further, we found that there are weird '###' in our ride_length column, let's clean it, and seperate them into hour and minute column df = df[ df.ride_length != "#######################################################################################################################################################################################################" ] df = df[ df.ride_length != "###############################################################################################################################################################################################################################################################" ] df[["ridelength_hour", "ridelength_minute"]] = df["ride_length"].str.split( ":", 1, expand=True ) df = df.drop(["ride_length"], axis=1) # To calculate mean of the ride_length, we need to convert it into int data type df.astype({"ridelength_hour": "int64"}).dtypes ridelength_hour_list = df.ridelength_hour.tolist() hour_list = [int(s) for s in ridelength_hour_list] hour_to_minute_list = [x * 60 for x in hour_list] df["hour_to_minute"] = hour_to_minute_list ridelength_minute_list = df.ridelength_minute.tolist() minute_list = [int(s) for s in ridelength_minute_list] df["minute_to_int"] = minute_list df = df.drop(["ridelength_hour", "ridelength_minute"], axis=1) sum_list = [] for x, y in zip(minute_list, hour_to_minute_list): sum_list.append(x + y) filtered = [] for x in sum_list: if x >= 60: filtered.append(x) df["ridelength_in_minutes"] = sum_list df = df.drop(["hour_to_minute", "minute_to_int"], axis=1) # #### Now our dataset are clean and tidy, it's now ready for analysis! # ---- # ### 2. Basis calculation and sum up df.to_csv("df_clean.csv", index=False) df_cleaned = pd.read_csv("../input/cleaned-data/df_clean.csv") # Here I'm just exporting the cleaned file then import it back so that I don't need to run every cells again everytime I reopen it df_cleaned.head() # --- # #### Before the calculation started, here's a quick look to our "Bussiness task" # Three questions will guide the future marketing program for the sharing bike company: # 1. How do annual members and casual riders use Cyclistic bikes differently? # 2. Why would casual riders buy Cyclistic annual memberships? # 3. How can Cyclistic use digital media to influence casual riders to become members? # We are responsible for the first question: How do annual members and casual riders use Cyclistic bikes differently? And we are going to answer it by dividing it into the **following 3 questions**: # 1. How do ride_length differ from casual and member riders? Why is there a difference?(Can't be done, leave it for now) # 2. When do each type ride mostly? Weektime? Weekend? Why is there a difference? # 3. Who ride electrical bike more? Why? # ----- # #### First : How do ride_length differs? df_casual = df_cleaned.loc[df_cleaned["member_casual"] == "casual"] df_member = df_cleaned.loc[df_cleaned["member_casual"] == "member"] df_member.describe() df_casual.describe() # #### Answer : # Casual riders have average ride length of about 34 minutes while member have an average of only 14 minutes. # -------- # #### Second: When do each type ride? df_member.groupby(by=["day_of_week"]).sum() df_casual.groupby(by=["day_of_week"]).sum() casual_count = [12833150, 6314262, 5650729, 5762943, 5755590, 8193701, 14747435] member_count = [4703186, 4145138, 4445319, 4767769, 4443507, 4731885, 5352431] day_of_week = [1, 2, 3, 4, 5, 6, 7] import matplotlib.pyplot as plt from matplotlib.pyplot import figure figure(figsize=(12, 6), dpi=80) plt.bar(day_of_week, casual_count) plt.bar(day_of_week, member_count) plt.xlabel("Day of Week") plt.ylabel("Ride count") plt.title("Ride pattern of members and casual riders") plt.legend(["Casual riders", "Members"]) plt.show() # #### Answer : # Member rides are evenly distributed during the week, while casual riders ride more often on weekends. # -------------- # #### Last but not least: What type of bikes does each group ride more? df_casual.groupby(by=["rideable_type", "day_of_week"]).sum() df_member.groupby(by=["rideable_type", "day_of_week"]).sum() # We want to graph the pattern of rideable_type to ridelength_in_minutes with week_day as hue, so we are going to use seaborn instead of matplotlib member_casual_sum = pd.read_csv("../input/member-casual-sum/member_casual_sum.csv") # The cell above is the sum of both casual riders and members in 1 csv file member_casual_sum.head() import seaborn as sns plot = sns.catplot( x="day_of_week", y="ridelength_in_minutes", hue="rideable_type", col="member_casual", data=member_casual_sum, kind="bar", height=4, aspect=1.5, ) plot.fig.subplots_adjust(top=0.8) plot.fig.suptitle("What bikes do they ride?")
false
2
1,928
0
1,969
1,928
69345358
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from ml_metrics import rmse from sklearn.metrics import mean_squared_error # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session train_df = pd.read_csv("/kaggle/input/neolen-house-price-prediction/train.csv") test_df = pd.read_csv("/kaggle/input/neolen-house-price-prediction/test.csv") train_df.head() # ## Data overview train_df.info() train_df.describe() train_df.isnull().sum() # ## remove columns with many null values # train_df.dropna(axis=1, thresh=200, inplace=True) # train_df.info() train_df.SalePrice.hist(bins=50, figsize=(12, 4)) plt.title("houses prices") plt.show() # ### **House prices range from 100k to 200k** plt.scatter(train_df["LotArea"], train_df["SalePrice"]) plt.show() sns.boxplot(x=train_df["LotArea"]) ##there is outliers so we use median to fill nans train_df.YearBuilt.hist(bins=14, rwidth=0.9, figsize=(12, 4)) plt.title("When were the houses built?") plt.show() # # Data processing # train_df.dropna(axis=0, subset=['SalePrice'], inplace=True) # y = train_df.SalePrice # train_df.drop(['SalePrice'], axis=1, inplace=True) # y numerical_columns = [] categorical_columns = [] low_cardinality_catrgorical_columns = [] for col in train_df.columns: if train_df[col].dtype in ["int64", "float64"]: numerical_columns.append(col) elif train_df[col].dtype == "object": categorical_columns.append(col) if train_df[col].nunique() < 10: low_cardinality_catrgorical_columns.append(col) train_df["Alley"] train_df["Alley"].fillna("NoAlley", inplace=True) test_df["Alley"].fillna("NoAlley", inplace=True) print(train_df["Alley"].isnull().sum(), test_df["Alley"].isnull().sum()) train_df["MasVnrType"].fillna("None", inplace=True) test_df["MasVnrType"].fillna("None", inplace=True) print(train_df["MasVnrType"].isnull().sum(), test_df["MasVnrType"].isnull().sum()) train_df["MasVnrArea"].fillna(train_df["MasVnrArea"].median(), inplace=True) test_df["MasVnrArea"].fillna(test_df["MasVnrArea"].median(), inplace=True) print(train_df["MasVnrArea"].isnull().sum(), test_df["MasVnrArea"].isnull().sum()) train_df["BsmtQual"].fillna("NoBas", inplace=True) test_df["BsmtQual"].fillna("NoBas", inplace=True) print(train_df["BsmtQual"].isnull().sum(), test_df["BsmtQual"].isnull().sum()) train_df["BsmtCond"].fillna("NoBas", inplace=True) test_df["BsmtCond"].fillna("NoBas", inplace=True) print(train_df["BsmtCond"].isnull().sum(), test_df["BsmtCond"].isnull().sum()) train_df["BsmtExposure"].fillna("NoBas", inplace=True) test_df["BsmtExposure"].fillna("NoBas", inplace=True) print(train_df["BsmtExposure"].isnull().sum(), test_df["BsmtExposure"].isnull().sum()) train_df["BsmtFinType1"].fillna("NoBas", inplace=True) test_df["BsmtFinType1"].fillna("NoBas", inplace=True) print(train_df["BsmtFinType1"].isnull().sum(), test_df["BsmtFinType1"].isnull().sum()) train_df["BsmtFinType2"].fillna("NoBas", inplace=True) test_df["BsmtFinType2"].fillna("NoBas", inplace=True) print(train_df["BsmtFinType2"].isnull().sum(), test_df["BsmtFinType2"].isnull().sum()) train_df["FireplaceQu"].fillna("NoFP", inplace=True) test_df["FireplaceQu"].fillna("NoFP", inplace=True) print(train_df["FireplaceQu"].isnull().sum(), test_df["FireplaceQu"].isnull().sum()) train_df["GarageType"].fillna("NoGarage", inplace=True) test_df["GarageType"].fillna("NoGarage", inplace=True) train_df["GarageFinish"].fillna("NoGarage", inplace=True) test_df["GarageFinish"].fillna("NoGarage", inplace=True) train_df["GarageQual"].fillna("NoGarage", inplace=True) test_df["GarageQual"].fillna("NoGarage", inplace=True) train_df["GarageCond"].fillna("NoGarage", inplace=True) test_df["GarageCond"].fillna("NoGarage", inplace=True) train_df["GarageYrBlt"].fillna(0, inplace=True) test_df["GarageYrBlt"].fillna(0, inplace=True) train_df["PoolQC"].fillna("NoPool", inplace=True) test_df["PoolQC"].fillna("NoPool", inplace=True) print(train_df["PoolQC"].isnull().sum(), test_df["PoolQC"].isnull().sum()) train_df["Fence"].fillna("NoFence", inplace=True) test_df["Fence"].fillna("NoFence", inplace=True) print(train_df["Fence"].isnull().sum(), test_df["Fence"].isnull().sum()) train_df["LotFrontage"].fillna(train_df["LotFrontage"].median(), inplace=True) test_df["LotFrontage"].fillna(test_df["LotFrontage"].median(), inplace=True) print(train_df["LotFrontage"].isnull().sum(), test_df["LotFrontage"].isnull().sum()) df_train_clean = train_df.drop("MiscFeature", axis=1) df_test_clean = test_df.drop("MiscFeature", axis=1) df_train_clean.isnull().sum().sum() df_train_clean.info() df_train_clean.duplicated().sum() df_test_clean.isnull().sum().sum() df_test_clean["Electrical"].fillna("SBrkr", inplace=True) df_test_clean["Electrical"].fillna("SBrkr", inplace=True) df_test_clean.isnull().sum().sum() df_test_clean.duplicated().sum() df_train_clean.info() categorical_columns2 = [ "MSZoning", "Street", "Alley", "LotShape", "LandContour", "Utilities", "LotConfig", "LandSlope", "Neighborhood", "Condition1", "Condition2", "BldgType", "HouseStyle", "RoofStyle", "RoofMatl", "Exterior1st", "Exterior2nd", "MasVnrType", "ExterQual", "ExterCond", "Foundation", "BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2", "Heating", "HeatingQC", "CentralAir", "Electrical", "KitchenQual", "Functional", "FireplaceQu", "GarageType", "GarageFinish", "GarageQual", "GarageCond", "PavedDrive", "PoolQC", "Fence", "SaleType", "SaleCondition", ] for cat in categorical_columns2: df_train_clean[cat] = pd.factorize(df_train_clean[cat])[0].reshape(-1, 1) df_test_clean[cat] = pd.factorize(df_test_clean[cat])[0].reshape(-1, 1) df_train = df_train_clean.copy() df_test = df_test_clean.copy() df_train.shape df_test.shape Y_train = df_train["SalePrice"] X_train = df_train.drop("SalePrice", axis=1) X_train_0, X_valid_0, Y_train_0, Y_valid_0 = train_test_split( X_train, Y_train, train_size=0.8, test_size=0.2, random_state=0 ) LR_model_1 = LinearRegression() LR_model_1.fit(X_train_0, Y_train_0) Y_pred_App1 = LR_model_1.predict(X_valid_0) r_sq_1 = LR_model_1.score(X_valid_0, Y_valid_0) print("Coefficient of Determination:", r_sq_1) print("RMSE (Appraoch 1):") print(np.sqrt(mean_squared_error(Y_pred_App1, Y_valid_0))) Y_valid_log = pd.DataFrame(np.log(Y_valid_0)) Y_pred_log = pd.DataFrame(np.log(Y_pred_App1)) Y_log = pd.concat([Y_valid_log, Y_pred_log], axis=1) Y_log.columns = ["Valid", "Pred"] Y_log.dropna(inplace=True) print(np.sqrt(mean_squared_error(Y_log.Pred, Y_log.Valid))) df_test_intercept = df_test.copy() df_test_intercept["intercept"] = 1 df_test_intercept.shape X_valid_0.shape output = pd.DataFrame({"Id": X_valid_0.index, "SalePrice": Y_pred_App1}) output.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/345/69345358.ipynb
null
null
[{"Id": 69345358, "ScriptId": 18889547, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7980976, "CreationDate": "07/29/2021 21:01:54", "VersionNumber": 1.0, "Title": "notebook6fe297e341", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 208.0, "LinesInsertedFromPrevious": 208.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from ml_metrics import rmse from sklearn.metrics import mean_squared_error # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session train_df = pd.read_csv("/kaggle/input/neolen-house-price-prediction/train.csv") test_df = pd.read_csv("/kaggle/input/neolen-house-price-prediction/test.csv") train_df.head() # ## Data overview train_df.info() train_df.describe() train_df.isnull().sum() # ## remove columns with many null values # train_df.dropna(axis=1, thresh=200, inplace=True) # train_df.info() train_df.SalePrice.hist(bins=50, figsize=(12, 4)) plt.title("houses prices") plt.show() # ### **House prices range from 100k to 200k** plt.scatter(train_df["LotArea"], train_df["SalePrice"]) plt.show() sns.boxplot(x=train_df["LotArea"]) ##there is outliers so we use median to fill nans train_df.YearBuilt.hist(bins=14, rwidth=0.9, figsize=(12, 4)) plt.title("When were the houses built?") plt.show() # # Data processing # train_df.dropna(axis=0, subset=['SalePrice'], inplace=True) # y = train_df.SalePrice # train_df.drop(['SalePrice'], axis=1, inplace=True) # y numerical_columns = [] categorical_columns = [] low_cardinality_catrgorical_columns = [] for col in train_df.columns: if train_df[col].dtype in ["int64", "float64"]: numerical_columns.append(col) elif train_df[col].dtype == "object": categorical_columns.append(col) if train_df[col].nunique() < 10: low_cardinality_catrgorical_columns.append(col) train_df["Alley"] train_df["Alley"].fillna("NoAlley", inplace=True) test_df["Alley"].fillna("NoAlley", inplace=True) print(train_df["Alley"].isnull().sum(), test_df["Alley"].isnull().sum()) train_df["MasVnrType"].fillna("None", inplace=True) test_df["MasVnrType"].fillna("None", inplace=True) print(train_df["MasVnrType"].isnull().sum(), test_df["MasVnrType"].isnull().sum()) train_df["MasVnrArea"].fillna(train_df["MasVnrArea"].median(), inplace=True) test_df["MasVnrArea"].fillna(test_df["MasVnrArea"].median(), inplace=True) print(train_df["MasVnrArea"].isnull().sum(), test_df["MasVnrArea"].isnull().sum()) train_df["BsmtQual"].fillna("NoBas", inplace=True) test_df["BsmtQual"].fillna("NoBas", inplace=True) print(train_df["BsmtQual"].isnull().sum(), test_df["BsmtQual"].isnull().sum()) train_df["BsmtCond"].fillna("NoBas", inplace=True) test_df["BsmtCond"].fillna("NoBas", inplace=True) print(train_df["BsmtCond"].isnull().sum(), test_df["BsmtCond"].isnull().sum()) train_df["BsmtExposure"].fillna("NoBas", inplace=True) test_df["BsmtExposure"].fillna("NoBas", inplace=True) print(train_df["BsmtExposure"].isnull().sum(), test_df["BsmtExposure"].isnull().sum()) train_df["BsmtFinType1"].fillna("NoBas", inplace=True) test_df["BsmtFinType1"].fillna("NoBas", inplace=True) print(train_df["BsmtFinType1"].isnull().sum(), test_df["BsmtFinType1"].isnull().sum()) train_df["BsmtFinType2"].fillna("NoBas", inplace=True) test_df["BsmtFinType2"].fillna("NoBas", inplace=True) print(train_df["BsmtFinType2"].isnull().sum(), test_df["BsmtFinType2"].isnull().sum()) train_df["FireplaceQu"].fillna("NoFP", inplace=True) test_df["FireplaceQu"].fillna("NoFP", inplace=True) print(train_df["FireplaceQu"].isnull().sum(), test_df["FireplaceQu"].isnull().sum()) train_df["GarageType"].fillna("NoGarage", inplace=True) test_df["GarageType"].fillna("NoGarage", inplace=True) train_df["GarageFinish"].fillna("NoGarage", inplace=True) test_df["GarageFinish"].fillna("NoGarage", inplace=True) train_df["GarageQual"].fillna("NoGarage", inplace=True) test_df["GarageQual"].fillna("NoGarage", inplace=True) train_df["GarageCond"].fillna("NoGarage", inplace=True) test_df["GarageCond"].fillna("NoGarage", inplace=True) train_df["GarageYrBlt"].fillna(0, inplace=True) test_df["GarageYrBlt"].fillna(0, inplace=True) train_df["PoolQC"].fillna("NoPool", inplace=True) test_df["PoolQC"].fillna("NoPool", inplace=True) print(train_df["PoolQC"].isnull().sum(), test_df["PoolQC"].isnull().sum()) train_df["Fence"].fillna("NoFence", inplace=True) test_df["Fence"].fillna("NoFence", inplace=True) print(train_df["Fence"].isnull().sum(), test_df["Fence"].isnull().sum()) train_df["LotFrontage"].fillna(train_df["LotFrontage"].median(), inplace=True) test_df["LotFrontage"].fillna(test_df["LotFrontage"].median(), inplace=True) print(train_df["LotFrontage"].isnull().sum(), test_df["LotFrontage"].isnull().sum()) df_train_clean = train_df.drop("MiscFeature", axis=1) df_test_clean = test_df.drop("MiscFeature", axis=1) df_train_clean.isnull().sum().sum() df_train_clean.info() df_train_clean.duplicated().sum() df_test_clean.isnull().sum().sum() df_test_clean["Electrical"].fillna("SBrkr", inplace=True) df_test_clean["Electrical"].fillna("SBrkr", inplace=True) df_test_clean.isnull().sum().sum() df_test_clean.duplicated().sum() df_train_clean.info() categorical_columns2 = [ "MSZoning", "Street", "Alley", "LotShape", "LandContour", "Utilities", "LotConfig", "LandSlope", "Neighborhood", "Condition1", "Condition2", "BldgType", "HouseStyle", "RoofStyle", "RoofMatl", "Exterior1st", "Exterior2nd", "MasVnrType", "ExterQual", "ExterCond", "Foundation", "BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2", "Heating", "HeatingQC", "CentralAir", "Electrical", "KitchenQual", "Functional", "FireplaceQu", "GarageType", "GarageFinish", "GarageQual", "GarageCond", "PavedDrive", "PoolQC", "Fence", "SaleType", "SaleCondition", ] for cat in categorical_columns2: df_train_clean[cat] = pd.factorize(df_train_clean[cat])[0].reshape(-1, 1) df_test_clean[cat] = pd.factorize(df_test_clean[cat])[0].reshape(-1, 1) df_train = df_train_clean.copy() df_test = df_test_clean.copy() df_train.shape df_test.shape Y_train = df_train["SalePrice"] X_train = df_train.drop("SalePrice", axis=1) X_train_0, X_valid_0, Y_train_0, Y_valid_0 = train_test_split( X_train, Y_train, train_size=0.8, test_size=0.2, random_state=0 ) LR_model_1 = LinearRegression() LR_model_1.fit(X_train_0, Y_train_0) Y_pred_App1 = LR_model_1.predict(X_valid_0) r_sq_1 = LR_model_1.score(X_valid_0, Y_valid_0) print("Coefficient of Determination:", r_sq_1) print("RMSE (Appraoch 1):") print(np.sqrt(mean_squared_error(Y_pred_App1, Y_valid_0))) Y_valid_log = pd.DataFrame(np.log(Y_valid_0)) Y_pred_log = pd.DataFrame(np.log(Y_pred_App1)) Y_log = pd.concat([Y_valid_log, Y_pred_log], axis=1) Y_log.columns = ["Valid", "Pred"] Y_log.dropna(inplace=True) print(np.sqrt(mean_squared_error(Y_log.Pred, Y_log.Valid))) df_test_intercept = df_test.copy() df_test_intercept["intercept"] = 1 df_test_intercept.shape X_valid_0.shape output = pd.DataFrame({"Id": X_valid_0.index, "SalePrice": Y_pred_App1}) output.to_csv("submission.csv", index=False)
false
0
2,636
0
2,636
2,636
69345972
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier train_df = pd.read_csv("../input/titanic/train.csv") train_df.head() train_df.tail() train_df.info() train_df.drop(columns="PassengerId").describe() # age has alot of missing values train_df.describe(include=["O"]) # train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False).plot.bar() g = sns.FacetGrid(train_df, col="Survived") g.map(plt.hist, "Age", bins=40) train_df["Age"].hist() train_df[train_df["Age"] <= 1] # most age => 1 survived # replace age < 1 with 1 train_df.loc[train_df["Age"] <= 1, "Age"] = 1 train_df.loc[train_df["Age"].isnull(), :].hist() train_df.boxplot("Age") train_df.loc[train_df["Age"].isnull(), :].describe(include=["O"]) # so most of missing age from class 3 ,parch and sibsp = 0 # so lets fill the missing value based on median of age of these features for males and females male_median_age = train_df.loc[ (train_df["Sex"] == "male") & (train_df["Pclass"] == 3) & (train_df["SibSp"] == 0) & (train_df["Parch"] == 0), :, ].median()["Age"] female_median_age = train_df.loc[ (train_df["Sex"] == "female") & (train_df["Pclass"] == 3) & (train_df["SibSp"] == 0) & (train_df["Parch"] == 0), :, ].median()["Age"] train_df[train_df["Sex"] == "male"] = train_df[train_df["Sex"] == "male"].fillna( value={"Age": male_median_age} ) train_df[train_df["Sex"] == "female"] = train_df[train_df["Sex"] == "female"].fillna( value={"Age": female_median_age} ) train_df.info() train_df.loc[train_df["Cabin"].isnull(), :].describe(include=["O"]) train_df_copy = train_df.copy() train_df_copy["Cabin"] = pd.factorize(train_df_copy["Cabin"])[0].reshape(-1, 1) g = sns.FacetGrid(train_df_copy, col="Survived") g.map(plt.hist, "Cabin", bins=40) # no impact of this column in survived so drop del train_df_copy train_df.drop("Cabin", axis=1, inplace=True) train_df.drop("Ticket", axis=1, inplace=True) train_df["Embarked"].value_counts() train_df[["Embarked", "Survived"]].groupby( ["Embarked"], as_index=False ).mean().sort_values(by="Survived", ascending=False) train_df["Embarked"] = train_df["Embarked"].fillna(train_df["Embarked"].mode()[0]) g = sns.FacetGrid(train_df, col="Survived") g.map(plt.hist, "Embarked", bins=40) train_df.info() train_df["Sex"] = train_df["Sex"].map({"female": 1, "male": 0}).astype(int) train_df["Embarked"] = train_df["Embarked"].map({"C": 3, "Q": 2, "S": 1}).astype(int) train_df.head() g = sns.FacetGrid(train_df, col="Survived") g.map(plt.hist, "Sex", bins=40) # lets extract the title from the Name then drop the name column train_df["Title"] = train_df.Name.str.extract(" ([A-Za-z]+)\.", expand=False) train_df.drop("Name", axis=1, inplace=True) train_df["Title"].value_counts() train_df[["Title", "Survived"]].groupby(["Title"], as_index=False).mean().sort_values( by="Survived", ascending=False ) titles_List = ["Mr", "Miss", "Mrs", "Master"] train_df.loc[~train_df["Title"].isin(titles_List), "Title"] = "Rare" train_df["Title"].value_counts() train_df[["Title", "Survived"]].groupby(["Title"], as_index=False).mean().sort_values( by="Survived", ascending=False ) train_df["Title"] = ( train_df["Title"] .map({"Mrs": 5, "Miss": 4, "Master": 3, "Rare": 2, "Mr": 1}) .astype(int) ) train_df.info() # let combine Sibsp Parch to know if passanger was alone or in family train_df.loc[(train_df["SibSp"] == 0) & (train_df["Parch"] == 0), "Alone"] = 1 train_df.fillna(0, inplace=True) train_df.drop(["SibSp", "Parch"], axis=1, inplace=True) train_df.info() train_df["Alone"].value_counts() train_df[["Fare", "Survived"]].groupby(["Fare"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # train_df.drop(['Fare'],axis = 1 ,inplace = True) train_df.info() from sklearn.model_selection import train_test_split train_df, val_df = train_test_split( train_df, test_size=0.2, random_state=42, stratify=train_df["Sex"] ) # Try adding `stratify` here X_train = train_df.drop(columns=["PassengerId", "Survived"]) y_train = train_df["Survived"] X_val = val_df.drop(columns=["PassengerId", "Survived"]) y_val = val_df["Survived"] from sklearn.feature_selection import mutual_info_classif MI_score_iris = mutual_info_classif(X_train, y_train, random_state=0) # Extract feature_names from the dataset feature_names_iris = X_train.columns # Print the name and mutual information score of each feature for feature in zip(feature_names_iris, MI_score_iris): print(feature) from sklearn.ensemble import RandomForestClassifier # Create an instance of the classifier classifier = RandomForestClassifier(n_estimators=100, max_depth=3, random_state=2) # Train the classifier classifier = classifier.fit(X_train, y_train) print( "The accuracy of the classifier on the validation set is ", (classifier.score(X_val, y_val)), ) # Logistic Regression from sklearn.model_selection import GridSearchCV logreg = LogisticRegression(solver="liblinear") grid_values = {"penalty": ["l1", "l2"], "C": np.logspace(-3, 3, 7)} grid_clf_acc = GridSearchCV(logreg, param_grid=grid_values, cv=10) grid_clf_acc grid_clf_acc.fit(X_train, y_train) y_pred_acc = grid_clf_acc.predict(X_val) from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score print("tuned hpyerparameters :(best parameters) ", grid_clf_acc.best_params_) # New Model Evaluation metrics print("Accuracy Score : " + str(accuracy_score(y_val, y_pred_acc))) print("Precision Score : " + str(precision_score(y_val, y_pred_acc))) print("Recall Score : " + str(recall_score(y_val, y_pred_acc))) print("F1 Score : " + str(f1_score(y_val, y_pred_acc))) logreg.fit(X_train, y_train) acc_log = logreg.score(X_val, y_val) acc_log logreg.fit(X_train, y_train) acc_log = logreg.score(X_val, y_val) acc_log from sklearn import svm clf = svm.SVC(kernel="linear", gamma="auto") clf.fit(X_train, y_train) clf.score(X_val, y_val) test_df_origin = pd.read_csv("../input/titanic/test.csv") test_df = test_df_origin.copy() test_df.loc[test_df["Age"] <= 1, "Age"] = 1 test_df[test_df["Sex"] == "male"] = test_df[test_df["Sex"] == "male"].fillna( value={"Age": male_median_age} ) test_df[test_df["Sex"] == "female"] = test_df[test_df["Sex"] == "female"].fillna( value={"Age": female_median_age} ) test_df.drop("Cabin", axis=1, inplace=True) test_df.drop("Ticket", axis=1, inplace=True) test_df["Embarked"] = test_df["Embarked"].fillna(test_df["Embarked"].mode()[0]) test_df["Sex"] = test_df["Sex"].map({"female": 1, "male": 0}).astype(int) test_df["Embarked"] = test_df["Embarked"].map({"C": 3, "Q": 2, "S": 1}).astype(int) test_df["Title"] = test_df.Name.str.extract(" ([A-Za-z]+)\.", expand=False) test_df.drop("Name", axis=1, inplace=True) test_df.loc[~test_df["Title"].isin(titles_List), "Title"] = "Rare" test_df["Title"] = ( test_df["Title"] .map({"Mrs": 5, "Miss": 4, "Master": 3, "Rare": 2, "Mr": 1}) .astype(int) ) test_df.loc[(test_df["SibSp"] == 0) & (test_df["Parch"] == 0), "Alone"] = 1 test_df.fillna(0, inplace=True) test_df.drop(["SibSp", "Parch"], axis=1, inplace=True) test_df.drop(["PassengerId"], axis=1, inplace=True) test_df.info() Y_test_pred = logreg.predict(test_df) # Y_test_pred = classifier.predict(test_df) # Y_test_pred = clf.predict(test_df) test_df_origin["Survived"] = Y_test_pred test_df_origin[["PassengerId", "Survived"]].to_csv( "/kaggle/working/submission.csv", index=False )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/345/69345972.ipynb
null
null
[{"Id": 69345972, "ScriptId": 18908022, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6537227, "CreationDate": "07/29/2021 21:17:01", "VersionNumber": 10.0, "Title": "Titanic", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 226.0, "LinesInsertedFromPrevious": 25.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 201.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier train_df = pd.read_csv("../input/titanic/train.csv") train_df.head() train_df.tail() train_df.info() train_df.drop(columns="PassengerId").describe() # age has alot of missing values train_df.describe(include=["O"]) # train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False).plot.bar() g = sns.FacetGrid(train_df, col="Survived") g.map(plt.hist, "Age", bins=40) train_df["Age"].hist() train_df[train_df["Age"] <= 1] # most age => 1 survived # replace age < 1 with 1 train_df.loc[train_df["Age"] <= 1, "Age"] = 1 train_df.loc[train_df["Age"].isnull(), :].hist() train_df.boxplot("Age") train_df.loc[train_df["Age"].isnull(), :].describe(include=["O"]) # so most of missing age from class 3 ,parch and sibsp = 0 # so lets fill the missing value based on median of age of these features for males and females male_median_age = train_df.loc[ (train_df["Sex"] == "male") & (train_df["Pclass"] == 3) & (train_df["SibSp"] == 0) & (train_df["Parch"] == 0), :, ].median()["Age"] female_median_age = train_df.loc[ (train_df["Sex"] == "female") & (train_df["Pclass"] == 3) & (train_df["SibSp"] == 0) & (train_df["Parch"] == 0), :, ].median()["Age"] train_df[train_df["Sex"] == "male"] = train_df[train_df["Sex"] == "male"].fillna( value={"Age": male_median_age} ) train_df[train_df["Sex"] == "female"] = train_df[train_df["Sex"] == "female"].fillna( value={"Age": female_median_age} ) train_df.info() train_df.loc[train_df["Cabin"].isnull(), :].describe(include=["O"]) train_df_copy = train_df.copy() train_df_copy["Cabin"] = pd.factorize(train_df_copy["Cabin"])[0].reshape(-1, 1) g = sns.FacetGrid(train_df_copy, col="Survived") g.map(plt.hist, "Cabin", bins=40) # no impact of this column in survived so drop del train_df_copy train_df.drop("Cabin", axis=1, inplace=True) train_df.drop("Ticket", axis=1, inplace=True) train_df["Embarked"].value_counts() train_df[["Embarked", "Survived"]].groupby( ["Embarked"], as_index=False ).mean().sort_values(by="Survived", ascending=False) train_df["Embarked"] = train_df["Embarked"].fillna(train_df["Embarked"].mode()[0]) g = sns.FacetGrid(train_df, col="Survived") g.map(plt.hist, "Embarked", bins=40) train_df.info() train_df["Sex"] = train_df["Sex"].map({"female": 1, "male": 0}).astype(int) train_df["Embarked"] = train_df["Embarked"].map({"C": 3, "Q": 2, "S": 1}).astype(int) train_df.head() g = sns.FacetGrid(train_df, col="Survived") g.map(plt.hist, "Sex", bins=40) # lets extract the title from the Name then drop the name column train_df["Title"] = train_df.Name.str.extract(" ([A-Za-z]+)\.", expand=False) train_df.drop("Name", axis=1, inplace=True) train_df["Title"].value_counts() train_df[["Title", "Survived"]].groupby(["Title"], as_index=False).mean().sort_values( by="Survived", ascending=False ) titles_List = ["Mr", "Miss", "Mrs", "Master"] train_df.loc[~train_df["Title"].isin(titles_List), "Title"] = "Rare" train_df["Title"].value_counts() train_df[["Title", "Survived"]].groupby(["Title"], as_index=False).mean().sort_values( by="Survived", ascending=False ) train_df["Title"] = ( train_df["Title"] .map({"Mrs": 5, "Miss": 4, "Master": 3, "Rare": 2, "Mr": 1}) .astype(int) ) train_df.info() # let combine Sibsp Parch to know if passanger was alone or in family train_df.loc[(train_df["SibSp"] == 0) & (train_df["Parch"] == 0), "Alone"] = 1 train_df.fillna(0, inplace=True) train_df.drop(["SibSp", "Parch"], axis=1, inplace=True) train_df.info() train_df["Alone"].value_counts() train_df[["Fare", "Survived"]].groupby(["Fare"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # train_df.drop(['Fare'],axis = 1 ,inplace = True) train_df.info() from sklearn.model_selection import train_test_split train_df, val_df = train_test_split( train_df, test_size=0.2, random_state=42, stratify=train_df["Sex"] ) # Try adding `stratify` here X_train = train_df.drop(columns=["PassengerId", "Survived"]) y_train = train_df["Survived"] X_val = val_df.drop(columns=["PassengerId", "Survived"]) y_val = val_df["Survived"] from sklearn.feature_selection import mutual_info_classif MI_score_iris = mutual_info_classif(X_train, y_train, random_state=0) # Extract feature_names from the dataset feature_names_iris = X_train.columns # Print the name and mutual information score of each feature for feature in zip(feature_names_iris, MI_score_iris): print(feature) from sklearn.ensemble import RandomForestClassifier # Create an instance of the classifier classifier = RandomForestClassifier(n_estimators=100, max_depth=3, random_state=2) # Train the classifier classifier = classifier.fit(X_train, y_train) print( "The accuracy of the classifier on the validation set is ", (classifier.score(X_val, y_val)), ) # Logistic Regression from sklearn.model_selection import GridSearchCV logreg = LogisticRegression(solver="liblinear") grid_values = {"penalty": ["l1", "l2"], "C": np.logspace(-3, 3, 7)} grid_clf_acc = GridSearchCV(logreg, param_grid=grid_values, cv=10) grid_clf_acc grid_clf_acc.fit(X_train, y_train) y_pred_acc = grid_clf_acc.predict(X_val) from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score print("tuned hpyerparameters :(best parameters) ", grid_clf_acc.best_params_) # New Model Evaluation metrics print("Accuracy Score : " + str(accuracy_score(y_val, y_pred_acc))) print("Precision Score : " + str(precision_score(y_val, y_pred_acc))) print("Recall Score : " + str(recall_score(y_val, y_pred_acc))) print("F1 Score : " + str(f1_score(y_val, y_pred_acc))) logreg.fit(X_train, y_train) acc_log = logreg.score(X_val, y_val) acc_log logreg.fit(X_train, y_train) acc_log = logreg.score(X_val, y_val) acc_log from sklearn import svm clf = svm.SVC(kernel="linear", gamma="auto") clf.fit(X_train, y_train) clf.score(X_val, y_val) test_df_origin = pd.read_csv("../input/titanic/test.csv") test_df = test_df_origin.copy() test_df.loc[test_df["Age"] <= 1, "Age"] = 1 test_df[test_df["Sex"] == "male"] = test_df[test_df["Sex"] == "male"].fillna( value={"Age": male_median_age} ) test_df[test_df["Sex"] == "female"] = test_df[test_df["Sex"] == "female"].fillna( value={"Age": female_median_age} ) test_df.drop("Cabin", axis=1, inplace=True) test_df.drop("Ticket", axis=1, inplace=True) test_df["Embarked"] = test_df["Embarked"].fillna(test_df["Embarked"].mode()[0]) test_df["Sex"] = test_df["Sex"].map({"female": 1, "male": 0}).astype(int) test_df["Embarked"] = test_df["Embarked"].map({"C": 3, "Q": 2, "S": 1}).astype(int) test_df["Title"] = test_df.Name.str.extract(" ([A-Za-z]+)\.", expand=False) test_df.drop("Name", axis=1, inplace=True) test_df.loc[~test_df["Title"].isin(titles_List), "Title"] = "Rare" test_df["Title"] = ( test_df["Title"] .map({"Mrs": 5, "Miss": 4, "Master": 3, "Rare": 2, "Mr": 1}) .astype(int) ) test_df.loc[(test_df["SibSp"] == 0) & (test_df["Parch"] == 0), "Alone"] = 1 test_df.fillna(0, inplace=True) test_df.drop(["SibSp", "Parch"], axis=1, inplace=True) test_df.drop(["PassengerId"], axis=1, inplace=True) test_df.info() Y_test_pred = logreg.predict(test_df) # Y_test_pred = classifier.predict(test_df) # Y_test_pred = clf.predict(test_df) test_df_origin["Survived"] = Y_test_pred test_df_origin[["PassengerId", "Survived"]].to_csv( "/kaggle/working/submission.csv", index=False )
false
0
2,897
0
2,897
2,897
69345111
<jupyter_start><jupyter_text>2019 Autonomous Vehicle Disengagement Reports ### Context Each company which hold a permit to test self-driving vehicles in California must report disengagements to the state's Department of Motor Vehicles. The department defines a disengagement as "a deactivation of the autonomous mode when a failure of the autonomous technology is detected or when the safe operation of the vehicle requires that the autonomous vehicle test driver disengage the autonomous mode and take immediate manual. control of the vehicle." This dataset of disengagement reports can highlight where the technology is still struggling. ### Content "The data files below contain the disengagements and autonomous miles traveled for permit holders who reported testing on California’s public roads between December 1, 2018 and November 30, 2019. Separate data files contain information on companies who received their permit in 2018 and are reporting testing activity in California for the first time (beyond the normal 12-month cycle)." Each report includes: Manufacturer Permit Number DATE VIN NUMBER VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER(Yes or No) DRIVER PRESENT(Yes or No) DISENGAGEMENT INITIATED BY(AV System, Test Driver, Remote Operator, or Passenger) DISENGAGEMENTLOCATION (Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility) DESCRIPTION OF FACTS CAUSING DISENGAGEMENT Kaggle dataset identifier: 2019-autonomous-vehicle-disengagement-reports <jupyter_code>import pandas as pd df = pd.read_csv('2019-autonomous-vehicle-disengagement-reports/2018-19_AutonomousVehicleDisengagementReports(firsttimefilers).csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 454 entries, 0 to 453 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Manufacturer 454 non-null object 1 Permit Number 454 non-null object 2 DATE 454 non-null object 3 VIN NUMBER 454 non-null object 4 VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER (Yes or No) 454 non-null object 5 DRIVER PRESENT (Yes or No) 454 non-null object 6 DISENGAGEMENT INITIATED BY (AV System, Test Driver, Remote Operator, or Passenger) 454 non-null object 7 DISENGAGEMENT LOCATION (Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility) 454 non-null object 8 DESCRIPTION OF FACTS CAUSING DISENGAGEMENT 454 non-null object 9 Unnamed: 9 24 non-null object 10 Unnamed: 10 1 non-null object dtypes: object(11) memory usage: 39.1+ KB <jupyter_text>Examples: { "Manufacturer": "Ambarella Corp.", "Permit Number": "AVT053", "DATE": "2018-03-14 00:00:00", "VIN NUMBER": "3LN6L5MU7HR609845", "VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)": "No", "DRIVER PRESENT\n(Yes or No)": "Yes", "DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)": "test driver", "DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)": "street", "DESCRIPTION OF FACTS CAUSING DISENGAGEMENT": "Unexpected result from the path planner in the given traffic conditions", "Unnamed: 9": NaN, "Unnamed: 10": NaN } { "Manufacturer": "Ambarella Corp.", "Permit Number": "AVT053", "DATE": "2018-03-14 00:00:00", "VIN NUMBER": "3LN6L5MU7HR609845", "VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)": "No", "DRIVER PRESENT\n(Yes or No)": "Yes", "DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)": "test driver", "DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)": "street", "DESCRIPTION OF FACTS CAUSING DISENGAGEMENT": "Unexpected result from the radar based perception in the given traffic conditions", "Unnamed: 9": NaN, "Unnamed: 10": NaN } { "Manufacturer": "Ambarella Corp.", "Permit Number": "AVT053", "DATE": "2018-03-14 00:00:00", "VIN NUMBER": "3LN6L5MU7HR609845", "VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)": "No", "DRIVER PRESENT\n(Yes or No)": "Yes", "DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)": "test driver", "DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)": "street", "DESCRIPTION OF FACTS CAUSING DISENGAGEMENT": "Unexpected result from the path planner in the given traffic conditions", "Unnamed: 9": NaN, "Unnamed: 10": NaN } { "Manufacturer": "Ambarella Corp.", "Permit Number": "AVT053", "DATE": "2018-03-14 00:00:00", "VIN NUMBER": "3LN6L5MU7HR609845", "VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)": "No", "DRIVER PRESENT\n(Yes or No)": "Yes", "DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)": "test driver", "DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)": "street", "DESCRIPTION OF FACTS CAUSING DISENGAGEMENT": "Unexpected result from the GPS system in the given traffic conditions", "Unnamed: 9": NaN, "Unnamed: 10": NaN } <jupyter_code>import pandas as pd df = pd.read_csv('2019-autonomous-vehicle-disengagement-reports/2019AutonomousVehicleDisengagementReports.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 8885 entries, 0 to 8884 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Manufacturer 8885 non-null object 1 Permit Number 8885 non-null object 2 DATE 8884 non-null object 3 VIN NUMBER 8884 non-null object 4 VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER (Yes or No) 8884 non-null object 5 DRIVER PRESENT (Yes or No) 8884 non-null object 6 DISENGAGEMENT INITIATED BY (AV System, Test Driver, Remote Operator, or Passenger) 8884 non-null object 7 DISENGAGEMENT LOCATION (Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility) 8884 non-null object 8 DESCRIPTION OF FACTS CAUSING DISENGAGEMENT 8884 non-null object dtypes: object(9) memory usage: 624.9+ KB <jupyter_text>Examples: { "Manufacturer": "AImotive Inc.", "Permit Number": "AVT003", "DATE": "2018-12-06 00:00:00", "VIN NUMBER": "JTDKN3DU5A1092792", "VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)": "No", "DRIVER PRESENT\n(Yes or No)": "Yes", "DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)": "Test Driver", "DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)": "Freeway", "DESCRIPTION OF FACTS CAUSING DISENGAGEMENT": "Lane change maneuver: risk of lane departure, caused by unstable target lane model" } { "Manufacturer": "AImotive Inc.", "Permit Number": "AVT003", "DATE": "2018-12-10 00:00:00", "VIN NUMBER": "JTDKN3DU5A1092792", "VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)": "No", "DRIVER PRESENT\n(Yes or No)": "Yes", "DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)": "Test Driver", "DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)": "Freeway", "DESCRIPTION OF FACTS CAUSING DISENGAGEMENT": "Lane change maneuver: risk of lane departure, caused by unstable target lane model" } { "Manufacturer": "AImotive Inc.", "Permit Number": "AVT003", "DATE": "2018-12-10 00:00:00", "VIN NUMBER": "JTDKN3DU5A1092792", "VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)": "No", "DRIVER PRESENT\n(Yes or No)": "Yes", "DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)": "Test Driver", "DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)": "Freeway", "DESCRIPTION OF FACTS CAUSING DISENGAGEMENT": "Lane change maneuver: risk of lane departure, caused by unstable target lane model" } { "Manufacturer": "AImotive Inc.", "Permit Number": "AVT003", "DATE": "2019-04-23 00:00:00", "VIN NUMBER": "JTDKN3DU5A1092792", "VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)": "No", "DRIVER PRESENT\n(Yes or No)": "Yes", "DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)": "Test Driver", "DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)": "Freeway", "DESCRIPTION OF FACTS CAUSING DISENGAGEMENT": "Lane change maneuver: risk of lane departure, caused by overshooting trajectory planning" } <jupyter_script>import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df1 = pd.read_csv( "../input/2019-autonomous-vehicle-disengagement-reports/2018-19_AutonomousVehicleDisengagementReports(firsttimefilers).csv" ) df2 = pd.read_csv( "../input/2019-autonomous-vehicle-disengagement-reports/2019AutonomousVehicleDisengagementReports.csv" ) # Combining Both DataFrames res = [df1, df2] df = pd.concat(res) # Renaming Columns for Legibility df.columns = [ "Manufacturer", "Permit Number", "Date", "VIN number", "Vehicle Is Capable Of Operating Without A Driver", "Driver Present", "Disengagement Initiated By", "Disengagement Location", "Description Of Facts Causing Disengagement", "N/A", "N/A", ] df.head(20) # Cleaning up dataframe to get rid of string issues such as "Test Driver" vs "test driver", etc. df["Disengagement Location"] = df["Disengagement Location"].replace( "street (high speed)", "street" ) df["Disengagement Location"] = df["Disengagement Location"].replace( "Downtown street", "street" ) df["Disengagement Location"] = df["Disengagement Location"].replace( "Rural", "rural road" ) df["Disengagement Location"] = df["Disengagement Location"].replace( "Parking Lot", "parking facility" ) df["Disengagement Location"] = df["Disengagement Location"].str.lower() df["Disengagement Location"] = df["Disengagement Location"].replace( "downtown street", "street" ) df["Driver Present"] = df["Driver Present"].str.lower() df["Driver Present"] = df["Driver Present"].replace("nan", "no") df["Vehicle Is Capable Of Operating Without A Driver"] = df[ "Vehicle Is Capable Of Operating Without A Driver" ].str.lower() df["Disengagement Initiated By"] = df["Disengagement Initiated By"].replace( "Safety Driver", "test driver" ) df["Disengagement Initiated By"] = df["Disengagement Initiated By"].replace( "Vehicle Operator", "test driver" ) df["Disengagement Initiated By"] = df["Disengagement Initiated By"].str.lower() df["Date"] = df["Date"].replace("1/30.2019", "1/30/2019") df["Date"] = pd.to_datetime(df["Date"]) df = df.set_index("Date") df["Description Of Facts Causing Disengagement"] = df[ "Description Of Facts Causing Disengagement" ].astype(str) df.head(20) # Count Statistics # Number of each stats: print("Number of reports: " + str(df.shape[0])) print("Number of unique manufacturers: " + str(df["Manufacturer"].nunique())) print("Number of unique vehicles: " + str(df["VIN number"].nunique())) print("Number of unique permits: " + str(df["Permit Number"].nunique())) # Disengagement Frequency Over Time dates = df.groupby("Date")["VIN number"].nunique() print(dates) plt.title("Disengagement Frequency Over Time") dates["2018-01-01":"2019-12-31"].resample("Q").count().plot(marker="D") plt.xlabel("Month", fontsize=18) plt.ylabel("Disengagements", fontsize=18) plt.xticks(fontsize=12) plt.yticks(fontsize=12) # Put markers in # Disengagement Initiated By # Disengagement Initiated By catinitiate = df["Disengagement Initiated By"].unique().tolist() catinitiate.pop() plt.figure(figsize=(25, 6)) plt.title("Disengagement Initiated By", fontsize=18) vcountinitiate = df["Disengagement Initiated By"].value_counts().tolist() colors = ["#66b3ff", "#ff9999", "#99ff99", "#ffcc99"] plt.pie(vcountinitiate, labels=catinitiate, autopct="%0.2f%%", colors=colors) plt.axis("equal") centre_circle = plt.Circle((0, 0), 0.70, fc="white") fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.show() # Disengagement Location # Disengagement Location (bar graph instead of pie as more legible) catlocation = df["Disengagement Location"].unique().tolist() catlocation.pop(-1) colors1 = [ "#ffcc99", "#99ff99", "#66b3ff", "#ff9999", ] catloc = range(len(catlocation)) plt.figure(figsize=(15, 6)) plt.title("Disengagement Location", fontsize=18) plt.xlabel("Location") plt.ylabel("Frequency") vcountlocation = df["Disengagement Location"].value_counts().tolist() plt.bar(catloc, vcountlocation, width=0.5, color=colors) ax = plt.subplot() ax.set_xticks(catloc) ax.set_xticklabels(catlocation, rotation=30) for i, v in enumerate(vcountlocation): ax.text(i - 0.15, v, str(v), color="black") plt.show() # ISSUE: Not able to data filter the downtown street to combine with street for some reason, need help with that. # Disengagements by Manufacturer # Number of Disengagements categorized by Manufacturer catmanu = df["Manufacturer"].unique().tolist() plt.figure(figsize=(25, 12)) plt.title("Disengagement by Manufacturer", fontsize=18) catmanunum = range(len(catmanu)) vcountmanu = df["Manufacturer"].value_counts().tolist() plt.bar(catmanunum, vcountmanu, width=0.5, color=["#1E90FF"]) plt.xlabel("Manufacturer", fontsize=15) plt.ylabel("Number of Disengagements", fontsize=15) ax = plt.subplot() ax.set_xticks(catmanunum) ax.set_xticklabels(catmanu, rotation=90) for i, v in enumerate(vcountmanu): ax.text(i - 0.25, v, str(v), color="black") plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.show() # Driver Present During Disengagements # Was Driver Present During the Disengagements catdriver = df["Driver Present"].unique().tolist() catdriver.pop() catdriver.pop() catdriver.append("unknown") vcountdriver = df["Driver Present"].value_counts().tolist() plt.figure(figsize=(12, 7)) plt.title("Driver Present During Disengagements") plt.pie(vcountdriver, labels=catdriver, autopct="%0.2f%%", colors=colors) plt.show() # Vehicle Capable Of Operating Without A Driver # Is Vehicle Capable of Operating Without A Driver catcap = df["Vehicle Is Capable Of Operating Without A Driver"].unique().tolist() catcap.pop() catcap.pop() catcap.append("unknown") colors2 = ["#00BFFF", "#ff9999", "#99ff99", "#ffcc99"] vcountcap = ( df["Vehicle Is Capable Of Operating Without A Driver"].value_counts().tolist() ) plt.figure(figsize=(12, 7)) plt.title("Vehicle Capabilities Of Operating Without A Driver", fontsize=18) plt.pie(vcountcap, labels=catcap, autopct="%0.2f%%", colors=colors) centre_circle = plt.Circle((0, 0), 0.70, fc="white") fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.show() # Disengagement Descriptions descriptions = ( df["Description Of Facts Causing Disengagement"] .str.lower() .str.replace("[^\w\s]", "") .replace("[^\n]", " ") ) totalreports = float(len(df.index)) plt.figure(figsize=(25, 12)) c = dict() for description in descriptions: description = description.split(" ") for word in np.unique(description): if word in c: c[word] += 1 else: c[word] = 1 catdescrip1 = [ "software", "hardware", "sensor", "camera", "lidar", "radar", "map", "gps", ] catdescrip2 = [ "planning", "perception", "tracking", "trajectory", "localization", "control", ] catdescrip3 = ["car", "vehicle", "truck", "pedestrians", "bicyclist"] catdescrip4 = ["light", "construction", "traffic", "intersection", "weather", "debris"] leng1 = range(len(catdescrip1)) leng2 = range(len(catdescrip2)) leng3 = range(len(catdescrip3)) leng4 = range(len(catdescrip4)) software = c["software"] hardware = c["hardware"] sensor = c["sensor"] camera = c["camera"] lidar = c["lidar"] radar = c["radar"] maap = c["map"] gps = c["gps"] planning = c["planning"] + c["planned"] perception = c["perception"] tracking = c["tracking"] trajectory = c["trajectory"] localization = c["localization"] control = c["control"] + c["controller"] car = c["car"] vehicle = c["vehicle"] truck = c["truck"] pedestrians = c["pedestrians"] bicyclist = c["bicyclist"] light = c["green"] + c["light"] construction = c["construction"] traffic = c["traffic"] intersection = c["intersection"] weather = c["weather"] + c["rain"] debris = c["debris"] listvalue1 = [software, hardware, sensor, camera, lidar, radar, maap, gps] listvalue2 = [planning, perception, tracking, trajectory, localization, control] listvalue3 = [car, vehicle, truck, pedestrians, bicyclist] listvalue4 = [light, construction, traffic, intersection, weather, debris] plt.title("Descriptions of Disengagements One", fontsize=18) plt.bar(leng1, listvalue1, color=["#1E90FF"]) ax = plt.subplot() ax.set_xticks(leng1) ax.set_xticklabels(catdescrip1, rotation=90, fontsize=15) for i, v in enumerate(listvalue1): ax.text(i - 0.25, v, str(v), color="black", fontsize=15) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.show() plt.figure(figsize=(25, 12)) plt.title("Descriptions of Disengagements Two", fontsize=18) plt.bar(leng2, listvalue2, color=["#1E90FF"]) ax = plt.subplot() ax.set_xticks(leng2) ax.set_xticklabels(catdescrip2, rotation=90, fontsize=15) for i, v in enumerate(listvalue2): ax.text(i - 0.25, v, str(v), color="black", fontsize=15) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.show() plt.figure(figsize=(25, 12)) plt.title("Descriptions of Disengagements Three", fontsize=18) plt.bar(leng3, listvalue3, color=["#1E90FF"]) ax = plt.subplot() ax.set_xticks(leng3) ax.set_xticklabels(catdescrip3, rotation=90, fontsize=15) for i, v in enumerate(listvalue3): ax.text(i - 0.25, v, str(v), color="black", fontsize=15) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.show() plt.figure(figsize=(25, 12)) plt.title("Descriptions of Disengagements Four", fontsize=18) plt.bar(leng4, listvalue4, color=["#1E90FF"]) ax = plt.subplot() ax.set_xticks(leng4) ax.set_xticklabels(catdescrip4, rotation=90, fontsize=15) for i, v in enumerate(listvalue4): ax.text(i - 0.25, v, str(v), color="black", fontsize=15) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/345/69345111.ipynb
2019-autonomous-vehicle-disengagement-reports
art12400
[{"Id": 69345111, "ScriptId": 18562899, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6895098, "CreationDate": "07/29/2021 20:55:42", "VersionNumber": 18.0, "Title": "Driver Data Analysis", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 285.0, "LinesInsertedFromPrevious": 8.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 277.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92397416, "KernelVersionId": 69345111, "SourceDatasetVersionId": 984061}]
[{"Id": 984061, "DatasetId": 538444, "DatasourceVersionId": 1012508, "CreatorUserId": 1429656, "LicenseName": "U.S. Government Works", "CreationDate": "03/03/2020 12:48:26", "VersionNumber": 3.0, "Title": "2019 Autonomous Vehicle Disengagement Reports", "Slug": "2019-autonomous-vehicle-disengagement-reports", "Subtitle": "Explore Shortcomings of Current Generation Self-Driving Cars", "Description": "### Context\n\nEach company which hold a permit to test self-driving vehicles in California must report disengagements to the state's Department of Motor Vehicles. The department defines a disengagement as \"a deactivation of the autonomous mode when a failure of the autonomous technology is detected or when the safe operation of the vehicle requires that the autonomous vehicle test driver disengage the autonomous mode and take immediate manual. control of the vehicle.\" This dataset of disengagement reports can highlight where the technology is still struggling. \n\n\n### Content\n\n\"The data files below contain the disengagements and autonomous miles traveled for permit holders who reported testing on California\u2019s public roads between December 1, 2018 and November 30, 2019. Separate data files contain information on companies who received their permit in 2018 and are reporting testing activity in California for the first time (beyond the normal 12-month cycle).\" Each report includes:\n\nManufacturer\nPermit Number\nDATE\nVIN NUMBER\nVEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER(Yes or No)\nDRIVER PRESENT(Yes or No)\nDISENGAGEMENT INITIATED BY(AV System, Test Driver, Remote Operator, or Passenger)\nDISENGAGEMENTLOCATION (Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)\nDESCRIPTION OF FACTS CAUSING DISENGAGEMENT\n\n### Acknowledgements\n\nOriginal data from State of California Department of Motor Vehicles. \nhttps://www.dmv.ca.gov/portal/dmv/detail/vr/autonomous/disengagement_report_2019\n\nBanner photo by Andrew Roberts on Unsplash\nhttps://unsplash.com/photos/6lqk_bNnw_c", "VersionNotes": "Fix file format of 2018-2019_AutonomousVehicleDisengagement", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 538444, "CreatorUserId": 1429656, "OwnerUserId": 1429656.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 984061.0, "CurrentDatasourceVersionId": 1012508.0, "ForumId": 551940, "Type": 2, "CreationDate": "03/03/2020 11:54:29", "LastActivityDate": "03/03/2020", "TotalViews": 8225, "TotalDownloads": 540, "TotalVotes": 19, "TotalKernels": 6}]
[{"Id": 1429656, "UserName": "art12400", "DisplayName": "Art124", "RegisterDate": "11/19/2017", "PerformanceTier": 1}]
import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df1 = pd.read_csv( "../input/2019-autonomous-vehicle-disengagement-reports/2018-19_AutonomousVehicleDisengagementReports(firsttimefilers).csv" ) df2 = pd.read_csv( "../input/2019-autonomous-vehicle-disengagement-reports/2019AutonomousVehicleDisengagementReports.csv" ) # Combining Both DataFrames res = [df1, df2] df = pd.concat(res) # Renaming Columns for Legibility df.columns = [ "Manufacturer", "Permit Number", "Date", "VIN number", "Vehicle Is Capable Of Operating Without A Driver", "Driver Present", "Disengagement Initiated By", "Disengagement Location", "Description Of Facts Causing Disengagement", "N/A", "N/A", ] df.head(20) # Cleaning up dataframe to get rid of string issues such as "Test Driver" vs "test driver", etc. df["Disengagement Location"] = df["Disengagement Location"].replace( "street (high speed)", "street" ) df["Disengagement Location"] = df["Disengagement Location"].replace( "Downtown street", "street" ) df["Disengagement Location"] = df["Disengagement Location"].replace( "Rural", "rural road" ) df["Disengagement Location"] = df["Disengagement Location"].replace( "Parking Lot", "parking facility" ) df["Disengagement Location"] = df["Disengagement Location"].str.lower() df["Disengagement Location"] = df["Disengagement Location"].replace( "downtown street", "street" ) df["Driver Present"] = df["Driver Present"].str.lower() df["Driver Present"] = df["Driver Present"].replace("nan", "no") df["Vehicle Is Capable Of Operating Without A Driver"] = df[ "Vehicle Is Capable Of Operating Without A Driver" ].str.lower() df["Disengagement Initiated By"] = df["Disengagement Initiated By"].replace( "Safety Driver", "test driver" ) df["Disengagement Initiated By"] = df["Disengagement Initiated By"].replace( "Vehicle Operator", "test driver" ) df["Disengagement Initiated By"] = df["Disengagement Initiated By"].str.lower() df["Date"] = df["Date"].replace("1/30.2019", "1/30/2019") df["Date"] = pd.to_datetime(df["Date"]) df = df.set_index("Date") df["Description Of Facts Causing Disengagement"] = df[ "Description Of Facts Causing Disengagement" ].astype(str) df.head(20) # Count Statistics # Number of each stats: print("Number of reports: " + str(df.shape[0])) print("Number of unique manufacturers: " + str(df["Manufacturer"].nunique())) print("Number of unique vehicles: " + str(df["VIN number"].nunique())) print("Number of unique permits: " + str(df["Permit Number"].nunique())) # Disengagement Frequency Over Time dates = df.groupby("Date")["VIN number"].nunique() print(dates) plt.title("Disengagement Frequency Over Time") dates["2018-01-01":"2019-12-31"].resample("Q").count().plot(marker="D") plt.xlabel("Month", fontsize=18) plt.ylabel("Disengagements", fontsize=18) plt.xticks(fontsize=12) plt.yticks(fontsize=12) # Put markers in # Disengagement Initiated By # Disengagement Initiated By catinitiate = df["Disengagement Initiated By"].unique().tolist() catinitiate.pop() plt.figure(figsize=(25, 6)) plt.title("Disengagement Initiated By", fontsize=18) vcountinitiate = df["Disengagement Initiated By"].value_counts().tolist() colors = ["#66b3ff", "#ff9999", "#99ff99", "#ffcc99"] plt.pie(vcountinitiate, labels=catinitiate, autopct="%0.2f%%", colors=colors) plt.axis("equal") centre_circle = plt.Circle((0, 0), 0.70, fc="white") fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.show() # Disengagement Location # Disengagement Location (bar graph instead of pie as more legible) catlocation = df["Disengagement Location"].unique().tolist() catlocation.pop(-1) colors1 = [ "#ffcc99", "#99ff99", "#66b3ff", "#ff9999", ] catloc = range(len(catlocation)) plt.figure(figsize=(15, 6)) plt.title("Disengagement Location", fontsize=18) plt.xlabel("Location") plt.ylabel("Frequency") vcountlocation = df["Disengagement Location"].value_counts().tolist() plt.bar(catloc, vcountlocation, width=0.5, color=colors) ax = plt.subplot() ax.set_xticks(catloc) ax.set_xticklabels(catlocation, rotation=30) for i, v in enumerate(vcountlocation): ax.text(i - 0.15, v, str(v), color="black") plt.show() # ISSUE: Not able to data filter the downtown street to combine with street for some reason, need help with that. # Disengagements by Manufacturer # Number of Disengagements categorized by Manufacturer catmanu = df["Manufacturer"].unique().tolist() plt.figure(figsize=(25, 12)) plt.title("Disengagement by Manufacturer", fontsize=18) catmanunum = range(len(catmanu)) vcountmanu = df["Manufacturer"].value_counts().tolist() plt.bar(catmanunum, vcountmanu, width=0.5, color=["#1E90FF"]) plt.xlabel("Manufacturer", fontsize=15) plt.ylabel("Number of Disengagements", fontsize=15) ax = plt.subplot() ax.set_xticks(catmanunum) ax.set_xticklabels(catmanu, rotation=90) for i, v in enumerate(vcountmanu): ax.text(i - 0.25, v, str(v), color="black") plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.show() # Driver Present During Disengagements # Was Driver Present During the Disengagements catdriver = df["Driver Present"].unique().tolist() catdriver.pop() catdriver.pop() catdriver.append("unknown") vcountdriver = df["Driver Present"].value_counts().tolist() plt.figure(figsize=(12, 7)) plt.title("Driver Present During Disengagements") plt.pie(vcountdriver, labels=catdriver, autopct="%0.2f%%", colors=colors) plt.show() # Vehicle Capable Of Operating Without A Driver # Is Vehicle Capable of Operating Without A Driver catcap = df["Vehicle Is Capable Of Operating Without A Driver"].unique().tolist() catcap.pop() catcap.pop() catcap.append("unknown") colors2 = ["#00BFFF", "#ff9999", "#99ff99", "#ffcc99"] vcountcap = ( df["Vehicle Is Capable Of Operating Without A Driver"].value_counts().tolist() ) plt.figure(figsize=(12, 7)) plt.title("Vehicle Capabilities Of Operating Without A Driver", fontsize=18) plt.pie(vcountcap, labels=catcap, autopct="%0.2f%%", colors=colors) centre_circle = plt.Circle((0, 0), 0.70, fc="white") fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.show() # Disengagement Descriptions descriptions = ( df["Description Of Facts Causing Disengagement"] .str.lower() .str.replace("[^\w\s]", "") .replace("[^\n]", " ") ) totalreports = float(len(df.index)) plt.figure(figsize=(25, 12)) c = dict() for description in descriptions: description = description.split(" ") for word in np.unique(description): if word in c: c[word] += 1 else: c[word] = 1 catdescrip1 = [ "software", "hardware", "sensor", "camera", "lidar", "radar", "map", "gps", ] catdescrip2 = [ "planning", "perception", "tracking", "trajectory", "localization", "control", ] catdescrip3 = ["car", "vehicle", "truck", "pedestrians", "bicyclist"] catdescrip4 = ["light", "construction", "traffic", "intersection", "weather", "debris"] leng1 = range(len(catdescrip1)) leng2 = range(len(catdescrip2)) leng3 = range(len(catdescrip3)) leng4 = range(len(catdescrip4)) software = c["software"] hardware = c["hardware"] sensor = c["sensor"] camera = c["camera"] lidar = c["lidar"] radar = c["radar"] maap = c["map"] gps = c["gps"] planning = c["planning"] + c["planned"] perception = c["perception"] tracking = c["tracking"] trajectory = c["trajectory"] localization = c["localization"] control = c["control"] + c["controller"] car = c["car"] vehicle = c["vehicle"] truck = c["truck"] pedestrians = c["pedestrians"] bicyclist = c["bicyclist"] light = c["green"] + c["light"] construction = c["construction"] traffic = c["traffic"] intersection = c["intersection"] weather = c["weather"] + c["rain"] debris = c["debris"] listvalue1 = [software, hardware, sensor, camera, lidar, radar, maap, gps] listvalue2 = [planning, perception, tracking, trajectory, localization, control] listvalue3 = [car, vehicle, truck, pedestrians, bicyclist] listvalue4 = [light, construction, traffic, intersection, weather, debris] plt.title("Descriptions of Disengagements One", fontsize=18) plt.bar(leng1, listvalue1, color=["#1E90FF"]) ax = plt.subplot() ax.set_xticks(leng1) ax.set_xticklabels(catdescrip1, rotation=90, fontsize=15) for i, v in enumerate(listvalue1): ax.text(i - 0.25, v, str(v), color="black", fontsize=15) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.show() plt.figure(figsize=(25, 12)) plt.title("Descriptions of Disengagements Two", fontsize=18) plt.bar(leng2, listvalue2, color=["#1E90FF"]) ax = plt.subplot() ax.set_xticks(leng2) ax.set_xticklabels(catdescrip2, rotation=90, fontsize=15) for i, v in enumerate(listvalue2): ax.text(i - 0.25, v, str(v), color="black", fontsize=15) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.show() plt.figure(figsize=(25, 12)) plt.title("Descriptions of Disengagements Three", fontsize=18) plt.bar(leng3, listvalue3, color=["#1E90FF"]) ax = plt.subplot() ax.set_xticks(leng3) ax.set_xticklabels(catdescrip3, rotation=90, fontsize=15) for i, v in enumerate(listvalue3): ax.text(i - 0.25, v, str(v), color="black", fontsize=15) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.show() plt.figure(figsize=(25, 12)) plt.title("Descriptions of Disengagements Four", fontsize=18) plt.bar(leng4, listvalue4, color=["#1E90FF"]) ax = plt.subplot() ax.set_xticks(leng4) ax.set_xticklabels(catdescrip4, rotation=90, fontsize=15) for i, v in enumerate(listvalue4): ax.text(i - 0.25, v, str(v), color="black", fontsize=15) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.show()
[{"2019-autonomous-vehicle-disengagement-reports/2018-19_AutonomousVehicleDisengagementReports(firsttimefilers).csv": {"column_names": "[\"Manufacturer\", \"Permit Number\", \"DATE\", \"VIN NUMBER\", \"VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\\n(Yes or No)\", \"DRIVER PRESENT\\n(Yes or No)\", \"DISENGAGEMENT INITIATED BY\\n(AV System, Test Driver, Remote Operator, or Passenger)\", \"DISENGAGEMENT\\nLOCATION\\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)\", \"DESCRIPTION OF FACTS CAUSING DISENGAGEMENT\", \"Unnamed: 9\", \"Unnamed: 10\"]", "column_data_types": "{\"Manufacturer\": \"object\", \"Permit Number\": \"object\", \"DATE\": \"object\", \"VIN NUMBER\": \"object\", \"VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\\n(Yes or No)\": \"object\", \"DRIVER PRESENT\\n(Yes or No)\": \"object\", \"DISENGAGEMENT INITIATED BY\\n(AV System, Test Driver, Remote Operator, or Passenger)\": \"object\", \"DISENGAGEMENT\\nLOCATION\\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)\": \"object\", \"DESCRIPTION OF FACTS CAUSING DISENGAGEMENT\": \"object\", \"Unnamed: 9\": \"object\", \"Unnamed: 10\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 454 entries, 0 to 453\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Manufacturer 454 non-null object\n 1 Permit Number 454 non-null object\n 2 DATE 454 non-null object\n 3 VIN NUMBER 454 non-null object\n 4 VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No) 454 non-null object\n 5 DRIVER PRESENT\n(Yes or No) 454 non-null object\n 6 DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger) 454 non-null object\n 7 DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility) 454 non-null object\n 8 DESCRIPTION OF FACTS CAUSING DISENGAGEMENT 454 non-null object\n 9 Unnamed: 9 24 non-null object\n 10 Unnamed: 10 1 non-null object\ndtypes: object(11)\nmemory usage: 39.1+ KB\n", "summary": "{\"Manufacturer\": {\"count\": 454, \"unique\": 8, \"top\": \"Intel Corporation\", \"freq\": 165}, \"Permit Number\": {\"count\": 454, \"unique\": 8, \"top\": \"AVT052\", \"freq\": 165}, \"DATE\": {\"count\": 454, \"unique\": 147, \"top\": \"6/7/2018\", \"freq\": 28}, \"VIN NUMBER\": {\"count\": 454, \"unique\": 15, \"top\": \"3FA6P0LU4HR195512\", \"freq\": 154}, \"VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\\n(Yes or No)\": {\"count\": 454, \"unique\": 2, \"top\": \"Yes\", \"freq\": 274}, \"DRIVER PRESENT\\n(Yes or No)\": {\"count\": 454, \"unique\": 1, \"top\": \"Yes\", \"freq\": 454}, \"DISENGAGEMENT INITIATED BY\\n(AV System, Test Driver, Remote Operator, or Passenger)\": {\"count\": 454, \"unique\": 4, \"top\": \"Test Driver\", \"freq\": 393}, \"DISENGAGEMENT\\nLOCATION\\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)\": {\"count\": 454, \"unique\": 4, \"top\": \"Street\", \"freq\": 375}, \"DESCRIPTION OF FACTS CAUSING DISENGAGEMENT\": {\"count\": 454, \"unique\": 104, \"top\": \"Software Discrepancy\", \"freq\": 141}, \"Unnamed: 9\": {\"count\": 24, \"unique\": 12, \"top\": \" On city road in heavy traffic with clear sky during day\", \"freq\": 5}, \"Unnamed: 10\": {\"count\": 1, \"unique\": 1, \"top\": \" On city road in heavy traffic with clear sky during dusk\", \"freq\": 1}}", "examples": "{\"Manufacturer\":{\"0\":\"Ambarella Corp.\",\"1\":\"Ambarella Corp.\",\"2\":\"Ambarella Corp.\",\"3\":\"Ambarella Corp.\"},\"Permit Number\":{\"0\":\"AVT053\",\"1\":\"AVT053\",\"2\":\"AVT053\",\"3\":\"AVT053\"},\"DATE\":{\"0\":\"3\\/14\\/2018\",\"1\":\"3\\/14\\/2018\",\"2\":\"3\\/14\\/2018\",\"3\":\"3\\/14\\/2018\"},\"VIN NUMBER\":{\"0\":\"3LN6L5MU7HR609845\",\"1\":\"3LN6L5MU7HR609845\",\"2\":\"3LN6L5MU7HR609845\",\"3\":\"3LN6L5MU7HR609845\"},\"VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\\n(Yes or No)\":{\"0\":\"No\",\"1\":\"No\",\"2\":\"No\",\"3\":\"No\"},\"DRIVER PRESENT\\n(Yes or No)\":{\"0\":\"Yes\",\"1\":\"Yes\",\"2\":\"Yes\",\"3\":\"Yes\"},\"DISENGAGEMENT INITIATED BY\\n(AV System, Test Driver, Remote Operator, or Passenger)\":{\"0\":\"test driver\",\"1\":\"test driver\",\"2\":\"test driver\",\"3\":\"test driver\"},\"DISENGAGEMENT\\nLOCATION\\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)\":{\"0\":\"street\",\"1\":\"street\",\"2\":\"street\",\"3\":\"street\"},\"DESCRIPTION OF FACTS CAUSING DISENGAGEMENT\":{\"0\":\"Unexpected result from the path planner in the given traffic conditions\",\"1\":\"Unexpected result from the radar based perception in the given traffic conditions\",\"2\":\"Unexpected result from the path planner in the given traffic conditions\",\"3\":\"Unexpected result from the GPS system in the given traffic conditions\"},\"Unnamed: 9\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"Unnamed: 10\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null}}"}}, {"2019-autonomous-vehicle-disengagement-reports/2019AutonomousVehicleDisengagementReports.csv": {"column_names": "[\"Manufacturer\", \"Permit Number\", \"DATE\", \"VIN NUMBER\", \"VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\\n(Yes or No)\", \"DRIVER PRESENT\\n(Yes or No)\", \"DISENGAGEMENT INITIATED BY\\n(AV System, Test Driver, Remote Operator, or Passenger)\", \"DISENGAGEMENT\\nLOCATION\\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)\", \"DESCRIPTION OF FACTS CAUSING DISENGAGEMENT\"]", "column_data_types": "{\"Manufacturer\": \"object\", \"Permit Number\": \"object\", \"DATE\": \"object\", \"VIN NUMBER\": \"object\", \"VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\\n(Yes or No)\": \"object\", \"DRIVER PRESENT\\n(Yes or No)\": \"object\", \"DISENGAGEMENT INITIATED BY\\n(AV System, Test Driver, Remote Operator, or Passenger)\": \"object\", \"DISENGAGEMENT\\nLOCATION\\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)\": \"object\", \"DESCRIPTION OF FACTS CAUSING DISENGAGEMENT\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 8885 entries, 0 to 8884\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Manufacturer 8885 non-null object\n 1 Permit Number 8885 non-null object\n 2 DATE 8884 non-null object\n 3 VIN NUMBER 8884 non-null object\n 4 VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No) 8884 non-null object\n 5 DRIVER PRESENT\n(Yes or No) 8884 non-null object\n 6 DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger) 8884 non-null object\n 7 DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility) 8884 non-null object\n 8 DESCRIPTION OF FACTS CAUSING DISENGAGEMENT 8884 non-null object\ndtypes: object(9)\nmemory usage: 624.9+ KB\n", "summary": "{\"Manufacturer\": {\"count\": 8885, \"unique\": 28, \"top\": \"Toyota Research Institute\", \"freq\": 2947}, \"Permit Number\": {\"count\": 8885, \"unique\": 27, \"top\": \"AVT050\", \"freq\": 2947}, \"DATE\": {\"count\": 8884, \"unique\": 3711, \"top\": \"3/28/2019\", \"freq\": 59}, \"VIN NUMBER\": {\"count\": 8884, \"unique\": 289, \"top\": \"JTHDU1EF3G5020098\", \"freq\": 900}, \"VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\\n(Yes or No)\": {\"count\": 8884, \"unique\": 5, \"top\": \"No\", \"freq\": 4369}, \"DRIVER PRESENT\\n(Yes or No)\": {\"count\": 8884, \"unique\": 4, \"top\": \"Yes\", \"freq\": 4934}, \"DISENGAGEMENT INITIATED BY\\n(AV System, Test Driver, Remote Operator, or Passenger)\": {\"count\": 8884, \"unique\": 4, \"top\": \"Test Driver\", \"freq\": 6037}, \"DISENGAGEMENT\\nLOCATION\\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)\": {\"count\": 8884, \"unique\": 11, \"top\": \"Street\", \"freq\": 4668}, \"DESCRIPTION OF FACTS CAUSING DISENGAGEMENT\": {\"count\": 8884, \"unique\": 469, \"top\": \"Safety Driver proactive disengagement.\", \"freq\": 1780}}", "examples": "{\"Manufacturer\":{\"0\":\"AImotive Inc.\",\"1\":\"AImotive Inc.\",\"2\":\"AImotive Inc.\",\"3\":\"AImotive Inc.\"},\"Permit Number\":{\"0\":\"AVT003\",\"1\":\"AVT003\",\"2\":\"AVT003\",\"3\":\"AVT003\"},\"DATE\":{\"0\":\"12.06.2018\",\"1\":\"12.10.2018\",\"2\":\"12.10.2018\",\"3\":\"04.23.2019\"},\"VIN NUMBER\":{\"0\":\"JTDKN3DU5A1092792\",\"1\":\"JTDKN3DU5A1092792\",\"2\":\"JTDKN3DU5A1092792\",\"3\":\"JTDKN3DU5A1092792\"},\"VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\\n(Yes or No)\":{\"0\":\"No\",\"1\":\"No\",\"2\":\"No\",\"3\":\"No\"},\"DRIVER PRESENT\\n(Yes or No)\":{\"0\":\"Yes\",\"1\":\"Yes\",\"2\":\"Yes\",\"3\":\"Yes\"},\"DISENGAGEMENT INITIATED BY\\n(AV System, Test Driver, Remote Operator, or Passenger)\":{\"0\":\"Test Driver\",\"1\":\"Test Driver\",\"2\":\"Test Driver\",\"3\":\"Test Driver\"},\"DISENGAGEMENT\\nLOCATION\\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)\":{\"0\":\"Freeway\",\"1\":\"Freeway\",\"2\":\"Freeway\",\"3\":\"Freeway\"},\"DESCRIPTION OF FACTS CAUSING DISENGAGEMENT\":{\"0\":\"Lane change maneuver: risk of lane departure, caused by unstable target lane model\",\"1\":\"Lane change maneuver: risk of lane departure, caused by unstable target lane model\",\"2\":\"Lane change maneuver: risk of lane departure, caused by unstable target lane model\",\"3\":\"Lane change maneuver: risk of lane departure, caused by overshooting trajectory planning\"}}"}}]
true
2
<start_data_description><data_path>2019-autonomous-vehicle-disengagement-reports/2018-19_AutonomousVehicleDisengagementReports(firsttimefilers).csv: <column_names> ['Manufacturer', 'Permit Number', 'DATE', 'VIN NUMBER', 'VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)', 'DRIVER PRESENT\n(Yes or No)', 'DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)', 'DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)', 'DESCRIPTION OF FACTS CAUSING DISENGAGEMENT', 'Unnamed: 9', 'Unnamed: 10'] <column_types> {'Manufacturer': 'object', 'Permit Number': 'object', 'DATE': 'object', 'VIN NUMBER': 'object', 'VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)': 'object', 'DRIVER PRESENT\n(Yes or No)': 'object', 'DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)': 'object', 'DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)': 'object', 'DESCRIPTION OF FACTS CAUSING DISENGAGEMENT': 'object', 'Unnamed: 9': 'object', 'Unnamed: 10': 'object'} <dataframe_Summary> {'Manufacturer': {'count': 454, 'unique': 8, 'top': 'Intel Corporation', 'freq': 165}, 'Permit Number': {'count': 454, 'unique': 8, 'top': 'AVT052', 'freq': 165}, 'DATE': {'count': 454, 'unique': 147, 'top': '6/7/2018', 'freq': 28}, 'VIN NUMBER': {'count': 454, 'unique': 15, 'top': '3FA6P0LU4HR195512', 'freq': 154}, 'VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)': {'count': 454, 'unique': 2, 'top': 'Yes', 'freq': 274}, 'DRIVER PRESENT\n(Yes or No)': {'count': 454, 'unique': 1, 'top': 'Yes', 'freq': 454}, 'DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)': {'count': 454, 'unique': 4, 'top': 'Test Driver', 'freq': 393}, 'DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)': {'count': 454, 'unique': 4, 'top': 'Street', 'freq': 375}, 'DESCRIPTION OF FACTS CAUSING DISENGAGEMENT': {'count': 454, 'unique': 104, 'top': 'Software Discrepancy', 'freq': 141}, 'Unnamed: 9': {'count': 24, 'unique': 12, 'top': ' On city road in heavy traffic with clear sky during day', 'freq': 5}, 'Unnamed: 10': {'count': 1, 'unique': 1, 'top': ' On city road in heavy traffic with clear sky during dusk', 'freq': 1}} <dataframe_info> RangeIndex: 454 entries, 0 to 453 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Manufacturer 454 non-null object 1 Permit Number 454 non-null object 2 DATE 454 non-null object 3 VIN NUMBER 454 non-null object 4 VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER (Yes or No) 454 non-null object 5 DRIVER PRESENT (Yes or No) 454 non-null object 6 DISENGAGEMENT INITIATED BY (AV System, Test Driver, Remote Operator, or Passenger) 454 non-null object 7 DISENGAGEMENT LOCATION (Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility) 454 non-null object 8 DESCRIPTION OF FACTS CAUSING DISENGAGEMENT 454 non-null object 9 Unnamed: 9 24 non-null object 10 Unnamed: 10 1 non-null object dtypes: object(11) memory usage: 39.1+ KB <some_examples> {'Manufacturer': {'0': 'Ambarella Corp.', '1': 'Ambarella Corp.', '2': 'Ambarella Corp.', '3': 'Ambarella Corp.'}, 'Permit Number': {'0': 'AVT053', '1': 'AVT053', '2': 'AVT053', '3': 'AVT053'}, 'DATE': {'0': '3/14/2018', '1': '3/14/2018', '2': '3/14/2018', '3': '3/14/2018'}, 'VIN NUMBER': {'0': '3LN6L5MU7HR609845', '1': '3LN6L5MU7HR609845', '2': '3LN6L5MU7HR609845', '3': '3LN6L5MU7HR609845'}, 'VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)': {'0': 'No', '1': 'No', '2': 'No', '3': 'No'}, 'DRIVER PRESENT\n(Yes or No)': {'0': 'Yes', '1': 'Yes', '2': 'Yes', '3': 'Yes'}, 'DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)': {'0': 'test driver', '1': 'test driver', '2': 'test driver', '3': 'test driver'}, 'DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)': {'0': 'street', '1': 'street', '2': 'street', '3': 'street'}, 'DESCRIPTION OF FACTS CAUSING DISENGAGEMENT': {'0': 'Unexpected result from the path planner in the given traffic conditions', '1': 'Unexpected result from the radar based perception in the given traffic conditions', '2': 'Unexpected result from the path planner in the given traffic conditions', '3': 'Unexpected result from the GPS system in the given traffic conditions'}, 'Unnamed: 9': {'0': None, '1': None, '2': None, '3': None}, 'Unnamed: 10': {'0': None, '1': None, '2': None, '3': None}} <end_description> <start_data_description><data_path>2019-autonomous-vehicle-disengagement-reports/2019AutonomousVehicleDisengagementReports.csv: <column_names> ['Manufacturer', 'Permit Number', 'DATE', 'VIN NUMBER', 'VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)', 'DRIVER PRESENT\n(Yes or No)', 'DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)', 'DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)', 'DESCRIPTION OF FACTS CAUSING DISENGAGEMENT'] <column_types> {'Manufacturer': 'object', 'Permit Number': 'object', 'DATE': 'object', 'VIN NUMBER': 'object', 'VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)': 'object', 'DRIVER PRESENT\n(Yes or No)': 'object', 'DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)': 'object', 'DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)': 'object', 'DESCRIPTION OF FACTS CAUSING DISENGAGEMENT': 'object'} <dataframe_Summary> {'Manufacturer': {'count': 8885, 'unique': 28, 'top': 'Toyota Research Institute', 'freq': 2947}, 'Permit Number': {'count': 8885, 'unique': 27, 'top': 'AVT050', 'freq': 2947}, 'DATE': {'count': 8884, 'unique': 3711, 'top': '3/28/2019', 'freq': 59}, 'VIN NUMBER': {'count': 8884, 'unique': 289, 'top': 'JTHDU1EF3G5020098', 'freq': 900}, 'VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)': {'count': 8884, 'unique': 5, 'top': 'No', 'freq': 4369}, 'DRIVER PRESENT\n(Yes or No)': {'count': 8884, 'unique': 4, 'top': 'Yes', 'freq': 4934}, 'DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)': {'count': 8884, 'unique': 4, 'top': 'Test Driver', 'freq': 6037}, 'DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)': {'count': 8884, 'unique': 11, 'top': 'Street', 'freq': 4668}, 'DESCRIPTION OF FACTS CAUSING DISENGAGEMENT': {'count': 8884, 'unique': 469, 'top': 'Safety Driver proactive disengagement.', 'freq': 1780}} <dataframe_info> RangeIndex: 8885 entries, 0 to 8884 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Manufacturer 8885 non-null object 1 Permit Number 8885 non-null object 2 DATE 8884 non-null object 3 VIN NUMBER 8884 non-null object 4 VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER (Yes or No) 8884 non-null object 5 DRIVER PRESENT (Yes or No) 8884 non-null object 6 DISENGAGEMENT INITIATED BY (AV System, Test Driver, Remote Operator, or Passenger) 8884 non-null object 7 DISENGAGEMENT LOCATION (Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility) 8884 non-null object 8 DESCRIPTION OF FACTS CAUSING DISENGAGEMENT 8884 non-null object dtypes: object(9) memory usage: 624.9+ KB <some_examples> {'Manufacturer': {'0': 'AImotive Inc.', '1': 'AImotive Inc.', '2': 'AImotive Inc.', '3': 'AImotive Inc.'}, 'Permit Number': {'0': 'AVT003', '1': 'AVT003', '2': 'AVT003', '3': 'AVT003'}, 'DATE': {'0': '12.06.2018', '1': '12.10.2018', '2': '12.10.2018', '3': '04.23.2019'}, 'VIN NUMBER': {'0': 'JTDKN3DU5A1092792', '1': 'JTDKN3DU5A1092792', '2': 'JTDKN3DU5A1092792', '3': 'JTDKN3DU5A1092792'}, 'VEHICLE IS CAPABLE OF OPERATING WITHOUT A DRIVER\n(Yes or No)': {'0': 'No', '1': 'No', '2': 'No', '3': 'No'}, 'DRIVER PRESENT\n(Yes or No)': {'0': 'Yes', '1': 'Yes', '2': 'Yes', '3': 'Yes'}, 'DISENGAGEMENT INITIATED BY\n(AV System, Test Driver, Remote Operator, or Passenger)': {'0': 'Test Driver', '1': 'Test Driver', '2': 'Test Driver', '3': 'Test Driver'}, 'DISENGAGEMENT\nLOCATION\n(Interstate, Freeway, Highway, Rural Road, Street, or Parking Facility)': {'0': 'Freeway', '1': 'Freeway', '2': 'Freeway', '3': 'Freeway'}, 'DESCRIPTION OF FACTS CAUSING DISENGAGEMENT': {'0': 'Lane change maneuver: risk of lane departure, caused by unstable target lane model', '1': 'Lane change maneuver: risk of lane departure, caused by unstable target lane model', '2': 'Lane change maneuver: risk of lane departure, caused by unstable target lane model', '3': 'Lane change maneuver: risk of lane departure, caused by overshooting trajectory planning'}} <end_description>
3,310
0
6,290
3,310
69345317
# ### Imports & Loading Data import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) train = pd.read_csv("/kaggle/input/titanic/train.csv", index_col="PassengerId") test = pd.read_csv("/kaggle/input/titanic/test.csv", index_col="PassengerId") # ### Exploring Data and EDA # Show train dataframe train.head() # Show test dataframe test.head() # dfs info. train.info() test.info() # Check NANs in Train Dataset train.isnull().sum() # firstly, we can drop the 2 records with NaN Embarked value train = train[train["Embarked"].notnull()] train.isnull().sum() # - We will have to deal with `Age`. # - `Cabin` feature is filled with NaNs and useless feature. # Check NANs in Test Dataset test.isnull().sum() # - We will have to deal with `Age`, `Fare` and `Cabin` NaNs # Check Duplicates train.duplicated().sum() test.duplicated().sum() # - We have `no duplicated` records # Survived vs Not train["Survived"].value_counts() / train.shape[0] # Survived vs Not plt.figure(figsize=(7, 7)) plt.pie(train["Survived"].value_counts(), labels=["Not Survived", "Survived"]) # - 61.8% Survived and 38.2% Not Survived train["Pclass"].value_counts() train["Pclass"].value_counts().plot(kind="barh") plt.title("Number of Passengers in Each Class") # - Most Passengers were in class 3 train.groupby(["Pclass"])["Survived"].value_counts(sort=False) train.groupby(["Pclass"])["Survived"].value_counts(sort=False).plot(kind="barh") plt.title("Survived vs Not Survived in Each Class") # - It seems that tha `most` of `class 3` `died`, and `high number` of `class 1 survived`. # - This feature will be `so important` in prediction. train.groupby(["Sex"])["Survived"].value_counts(sort=False) train.groupby(["Sex"])["Survived"].value_counts(sort=False).plot(kind="barh") plt.title("Survived vs Not Survived for Each Sex") # - It seems that `most females Survived`, but `most males died`. # - This feature will be `so important` in prediction. train.groupby(["SibSp"])["Survived"].value_counts(sort=False) train.groupby(["SibSp"])["Survived"].value_counts(sort=False).plot(kind="barh") plt.title("Survived vs Not Survived for Number of Siblings or Spouses") # - Almost `all passengers` with `high` number of `Siblings or Spouses died`. # - Most of passengers with `zero Siblings or Spouses` also `died`. # - In between the numbers are close. train.groupby(["Parch"])["Survived"].value_counts(sort=False) train.groupby(["Parch"])["Survived"].value_counts(sort=False).plot(kind="barh") plt.title("Survived vs Not Survived for Number of Parents or Children") # - Most of passengers number of Parents or Children above 3 `died`. # - Most of passengers with `zero Parents or Children` also `died`. # - In between the numbers are close. # - Number of Siblings or Spouses & Parents or Children can be `mixed` and give a valuable feature of `number of family`. train["Fare"].astype(int).value_counts(sort=False) train["Fare"].astype(int).describe() # - Although it's `not logical` that `15 passengers` travelled with `0 fare`, we can `change` the fare into catagories `describe the affordability status` of each passenger. train["Embarked"].value_counts() / train.shape[0] train.groupby(["Embarked"])["Survived"].value_counts(sort=False) train.groupby(["Embarked"])["Survived"].value_counts(sort=False).plot(kind="barh") plt.title("Survived vs Not Survived for Each Port of Embarkation") # - Number of survivore and deads for each Port of Embarkation are `close`, but `most of Southampton Passengers died`. train.groupby(["Pclass"])["Age"].describe() train.groupby(["Pclass"])["Age"].describe() # - Ages has outliers, so it can be `imputed using median` not mean. # ### Cleaning Datasets of NaNs & Useless Feature # Taking copies train_copy = train.copy() test_copy = test.copy() # Age imputed using median age of each Pclass for i in range(1, 4): train_copy.loc[ (train_copy["Pclass"] == i) & (train_copy["Age"].isnull()), "Age" ] = train_copy.loc[train_copy["Pclass"] == i, "Age"].median() test_copy.loc[ (test_copy["Pclass"] == i) & (test_copy["Age"].isnull()), "Age" ] = test_copy.loc[test_copy["Pclass"] == i, "Age"].median() train_copy.isnull().sum() test_copy.isnull().sum() # Test Fare imputed using mean fare test_copy["Fare"].fillna(test_copy["Fare"].mean(), inplace=True) test_copy.isnull().sum() # Dropping Cabin feature train_copy.drop("Cabin", axis=1, inplace=True) test_copy.drop("Cabin", axis=1, inplace=True) train_copy.isnull().sum() test_copy.isnull().sum() # ### Data Pre-Processing # Tickets differ, each ticket gives an indication for the class, place and cabin by some-how # So, we can convert it's numbers length to a feature train_copy["Ticket_Type"] = train_copy["Ticket"].str.extract(r"(\d{3,8})") test_copy["Ticket_Type"] = test_copy["Ticket"].str.extract(r"(\d{3,8})") train_copy["Ticket_Type"] = train_copy["Ticket_Type"].apply(lambda x: len(str(x))) test_copy["Ticket_Type"] = test_copy["Ticket_Type"].apply(lambda x: len(str(x))) train_copy.info() test_copy.isnull().sum() # Drop Ticket train_copy.drop("Ticket", axis=1, inplace=True) test_copy.drop("Ticket", axis=1, inplace=True) # We can Extract Titles of Name train_copy["Title"] = train_copy["Name"].str.extract(r",\s(\w+)") test_copy["Title"] = test_copy["Name"].str.extract(r",\s(\w+)") train_copy["Title"].value_counts() train_copy["Title"] = train_copy["Title"].replace( [ "Dr", "Rev", "Major", "Col", "Lady", "Sir", "Jonkheer", "the", "Don", "Capt", "Ms", ], "Others", ) train_copy["Title"] = train_copy["Title"].replace(["Mlle", "Mme"], "Mrs") train_copy["Title"].value_counts() test_copy["Title"].value_counts() test_copy["Title"] = test_copy["Title"].replace( ["Dr", "Rev", "Col", "Dona", "Ms"], "Others" ) test_copy["Title"].value_counts() # Drop Name Feature train_copy.drop("Name", axis=1, inplace=True) test_copy.drop("Name", axis=1, inplace=True) # we can get a Married_Ladies with children out of Title and Parch Features as they have the priority in Life saving train_copy["Married_Lady"] = train_copy["Title"].apply(lambda x: 1 if x == "Mrs" else 0) test_copy["Married_Lady"] = test_copy["Title"].apply(lambda x: 1 if x == "Mrs" else 0) train_copy["With_Children"] = train_copy["Parch"].apply(lambda x: 1 if x > 0 else 0) test_copy["With_Children"] = test_copy["Parch"].apply(lambda x: 1 if x > 0 else 0) train_copy["Married_With_Childern"] = ( train_copy["Married_Lady"] + train_copy["With_Children"] ) test_copy["Married_With_Childern"] = ( test_copy["Married_Lady"] + test_copy["With_Children"] ) train_copy["Married_With_Childern"] = train_copy["Married_With_Childern"].apply( lambda x: 1 if x == 2 else 0 ) test_copy["Married_With_Childern"] = test_copy["Married_With_Childern"].apply( lambda x: 1 if x == 2 else 0 ) # Drop With_Children feature train_copy.drop("With_Children", axis=1, inplace=True) test_copy.drop("With_Children", axis=1, inplace=True) train_copy.info() # We can get number of one family on Board train_copy["Family_Members"] = train_copy["SibSp"] + train_copy["Parch"] + 1 test_copy["Family_Members"] = test_copy["SibSp"] + test_copy["Parch"] + 1 train_copy["Family_Members"].value_counts() # We can get if its single or not train_copy["Single"] = train_copy["Family_Members"].apply(lambda x: 1 if x == 1 else 0) test_copy["Single"] = test_copy["Family_Members"].apply(lambda x: 1 if x == 1 else 0) # Drop SibSp and Parch column train_copy.drop(["SibSp", "Parch"], axis=1, inplace=True) test_copy.drop(["SibSp", "Parch"], axis=1, inplace=True) train_copy.info() # We can get if Passenger is Old or Child or not, they have a priority in saving train_copy["Old"] = train_copy["Age"].apply(lambda x: 1 if x >= 55 else 0) test_copy["Old"] = test_copy["Age"].apply(lambda x: 1 if x >= 55 else 0) train_copy["Child"] = train_copy["Age"].apply(lambda x: 1 if x <= 10 else 0) test_copy["Child"] = test_copy["Age"].apply(lambda x: 1 if x <= 10 else 0) # We can get if Passenger is rich or not, they have better chance to be in open air cabins, so better chance to save train_copy["Rich"] = train_copy["Fare"].apply(lambda x: 1 if x >= 200 else 0) test_copy["Rich"] = test_copy["Fare"].apply(lambda x: 1 if x >= 200 else 0) # ### Catagorized Columns Encoding Catagorical_features = list(train_copy.select_dtypes(include=object).columns) Catagorical_features # OneHot Encoding OneHot_Encoded_train = pd.get_dummies(train_copy) OneHot_Encoded_test = pd.get_dummies(test_copy) OneHot_Encoded_train.info() OneHot_Encoded_test.info() # Labels Encoding Labels_Encoded_train = train_copy.copy() Labels_Encoded_test = test_copy.copy() for column in Catagorical_features: Labels_Encoded_train[column] = pd.factorize(Labels_Encoded_train[column])[ 0 ].reshape(-1, 1) Labels_Encoded_test[column] = pd.factorize(Labels_Encoded_test[column])[0].reshape( -1, 1 ) Labels_Encoded_train.info() Labels_Encoded_test.info() # ### Model 1 from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score x = OneHot_Encoded_train.drop("Survived", axis=1) y = OneHot_Encoded_train["Survived"] x_train, x_valid, y_train, y_valid = train_test_split( x, y, test_size=0.2, stratify=y, random_state=42 ) # Model Using different Solvers LogisticRegressionSolvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"] for solver in LogisticRegressionSolvers: model = LogisticRegression(solver=solver, max_iter=10000) model.fit(x_train, y_train) y_predict = model.predict(x_valid) print(f'Results of Logistic Regression Model 1 USing "{solver}" Solver') print("Model Accuracy = {}".format(accuracy_score(y_valid, y_predict))) print("Model Precision = {}".format(precision_score(y_valid, y_predict))) print("Model Recall = {}".format(recall_score(y_valid, y_predict))) print("") # ##### Model using `"liblinear", "newton-cg" and "lbfgs" Solvers` are so close. # Using GridSearchCV instead of Iteration like before from sklearn.model_selection import GridSearchCV LogisticRegressionSolvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"] iteration = [100, 250, 500, 1000, 2000, 5000, 10000] parameters_grid = {"solver": LogisticRegressionSolvers, "max_iter": iteration} GridSearchResult = GridSearchCV( LogisticRegression(), parameters_grid, scoring=["accuracy", "precision", "recall"], refit="accuracy", return_train_score=True, cv=4, ) GridSearchResult.fit(x, y) print(GridSearchResult.best_params_) print(GridSearchResult.best_score_) print(GridSearchResult.best_estimator_) # ### Model 2 # ##### Same as Model 1 except using Labeled Encoded data to see if the different Encoding can change the performance x = Labels_Encoded_train.drop("Survived", axis=1) y = Labels_Encoded_train["Survived"] x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=0.2, stratify=y) # Model Using different Solvers LogisticRegressionSolvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"] for solver in LogisticRegressionSolvers: model = LogisticRegression(solver=solver, max_iter=10000) model.fit(x_train, y_train) y_predict = model.predict(x_valid) print(f'Results of Logistic Regression Model 2 USing "{solver}" Solver') print("Model Accuracy = {}".format(accuracy_score(y_valid, y_predict))) print("Model Precision = {}".format(precision_score(y_valid, y_predict))) print("Model Recall = {}".format(recall_score(y_valid, y_predict))) print("") # ##### It seems that `Label Encoding` is almost the same as `OneHot Encoding` in this case. # Using GridSearchCV instead of Iteration like before GridSearchResult.fit(x, y) print(GridSearchResult.best_params_) print(GridSearchResult.best_score_) print(GridSearchResult.best_estimator_) # ### Model 3 # ##### Trying to use valuable features that may enhance the model OneHot_Encoded_train.columns selected_features = [ "Pclass", "Age", "Married_With_Childern", "Family_Members", "Single", "Child", "Sex_female", "Sex_male", "Title_Master", "Title_Miss", "Title_Mr", "Title_Mrs", "Title_Others", ] # Selected Datasets selected_train = OneHot_Encoded_train[selected_features + ["Survived"]] selected_test = OneHot_Encoded_test[selected_features] x = selected_train.drop("Survived", axis=1) y = selected_train["Survived"] x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=0.2, stratify=y) # Model Using different Solvers LogisticRegressionSolvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"] for solver in LogisticRegressionSolvers: model = LogisticRegression(solver=solver, max_iter=10000) model.fit(x_train, y_train) y_predict = model.predict(x_valid) print(f'Results of Logistic Regression Model 2 USing "{solver}" Solver') print("Model Accuracy = {}".format(accuracy_score(y_valid, y_predict))) print("Model Precision = {}".format(precision_score(y_valid, y_predict))) print("Model Recall = {}".format(recall_score(y_valid, y_predict))) print("") # Using GridSearchCV instead of Iteration like before GridSearchResult.fit(x, y) print(GridSearchResult.best_params_) print(GridSearchResult.best_score_) print(GridSearchResult.best_estimator_) # ### Result # - As shown below, the best model we have got is the `third model`. best_model = LogisticRegression(solver="liblinear", max_iter=100) best_model.fit(x, y) test_predict = best_model.predict(selected_test) # Saving test predictions to file output = pd.DataFrame({"PassengerId": selected_test.index, "Survived": test_predict}) output.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/345/69345317.ipynb
null
null
[{"Id": 69345317, "ScriptId": 18932948, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7772887, "CreationDate": "07/29/2021 21:00:42", "VersionNumber": 1.0, "Title": "Titanic Kaggle Competition - Wssam Hassan", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 388.0, "LinesInsertedFromPrevious": 388.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# ### Imports & Loading Data import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) train = pd.read_csv("/kaggle/input/titanic/train.csv", index_col="PassengerId") test = pd.read_csv("/kaggle/input/titanic/test.csv", index_col="PassengerId") # ### Exploring Data and EDA # Show train dataframe train.head() # Show test dataframe test.head() # dfs info. train.info() test.info() # Check NANs in Train Dataset train.isnull().sum() # firstly, we can drop the 2 records with NaN Embarked value train = train[train["Embarked"].notnull()] train.isnull().sum() # - We will have to deal with `Age`. # - `Cabin` feature is filled with NaNs and useless feature. # Check NANs in Test Dataset test.isnull().sum() # - We will have to deal with `Age`, `Fare` and `Cabin` NaNs # Check Duplicates train.duplicated().sum() test.duplicated().sum() # - We have `no duplicated` records # Survived vs Not train["Survived"].value_counts() / train.shape[0] # Survived vs Not plt.figure(figsize=(7, 7)) plt.pie(train["Survived"].value_counts(), labels=["Not Survived", "Survived"]) # - 61.8% Survived and 38.2% Not Survived train["Pclass"].value_counts() train["Pclass"].value_counts().plot(kind="barh") plt.title("Number of Passengers in Each Class") # - Most Passengers were in class 3 train.groupby(["Pclass"])["Survived"].value_counts(sort=False) train.groupby(["Pclass"])["Survived"].value_counts(sort=False).plot(kind="barh") plt.title("Survived vs Not Survived in Each Class") # - It seems that tha `most` of `class 3` `died`, and `high number` of `class 1 survived`. # - This feature will be `so important` in prediction. train.groupby(["Sex"])["Survived"].value_counts(sort=False) train.groupby(["Sex"])["Survived"].value_counts(sort=False).plot(kind="barh") plt.title("Survived vs Not Survived for Each Sex") # - It seems that `most females Survived`, but `most males died`. # - This feature will be `so important` in prediction. train.groupby(["SibSp"])["Survived"].value_counts(sort=False) train.groupby(["SibSp"])["Survived"].value_counts(sort=False).plot(kind="barh") plt.title("Survived vs Not Survived for Number of Siblings or Spouses") # - Almost `all passengers` with `high` number of `Siblings or Spouses died`. # - Most of passengers with `zero Siblings or Spouses` also `died`. # - In between the numbers are close. train.groupby(["Parch"])["Survived"].value_counts(sort=False) train.groupby(["Parch"])["Survived"].value_counts(sort=False).plot(kind="barh") plt.title("Survived vs Not Survived for Number of Parents or Children") # - Most of passengers number of Parents or Children above 3 `died`. # - Most of passengers with `zero Parents or Children` also `died`. # - In between the numbers are close. # - Number of Siblings or Spouses & Parents or Children can be `mixed` and give a valuable feature of `number of family`. train["Fare"].astype(int).value_counts(sort=False) train["Fare"].astype(int).describe() # - Although it's `not logical` that `15 passengers` travelled with `0 fare`, we can `change` the fare into catagories `describe the affordability status` of each passenger. train["Embarked"].value_counts() / train.shape[0] train.groupby(["Embarked"])["Survived"].value_counts(sort=False) train.groupby(["Embarked"])["Survived"].value_counts(sort=False).plot(kind="barh") plt.title("Survived vs Not Survived for Each Port of Embarkation") # - Number of survivore and deads for each Port of Embarkation are `close`, but `most of Southampton Passengers died`. train.groupby(["Pclass"])["Age"].describe() train.groupby(["Pclass"])["Age"].describe() # - Ages has outliers, so it can be `imputed using median` not mean. # ### Cleaning Datasets of NaNs & Useless Feature # Taking copies train_copy = train.copy() test_copy = test.copy() # Age imputed using median age of each Pclass for i in range(1, 4): train_copy.loc[ (train_copy["Pclass"] == i) & (train_copy["Age"].isnull()), "Age" ] = train_copy.loc[train_copy["Pclass"] == i, "Age"].median() test_copy.loc[ (test_copy["Pclass"] == i) & (test_copy["Age"].isnull()), "Age" ] = test_copy.loc[test_copy["Pclass"] == i, "Age"].median() train_copy.isnull().sum() test_copy.isnull().sum() # Test Fare imputed using mean fare test_copy["Fare"].fillna(test_copy["Fare"].mean(), inplace=True) test_copy.isnull().sum() # Dropping Cabin feature train_copy.drop("Cabin", axis=1, inplace=True) test_copy.drop("Cabin", axis=1, inplace=True) train_copy.isnull().sum() test_copy.isnull().sum() # ### Data Pre-Processing # Tickets differ, each ticket gives an indication for the class, place and cabin by some-how # So, we can convert it's numbers length to a feature train_copy["Ticket_Type"] = train_copy["Ticket"].str.extract(r"(\d{3,8})") test_copy["Ticket_Type"] = test_copy["Ticket"].str.extract(r"(\d{3,8})") train_copy["Ticket_Type"] = train_copy["Ticket_Type"].apply(lambda x: len(str(x))) test_copy["Ticket_Type"] = test_copy["Ticket_Type"].apply(lambda x: len(str(x))) train_copy.info() test_copy.isnull().sum() # Drop Ticket train_copy.drop("Ticket", axis=1, inplace=True) test_copy.drop("Ticket", axis=1, inplace=True) # We can Extract Titles of Name train_copy["Title"] = train_copy["Name"].str.extract(r",\s(\w+)") test_copy["Title"] = test_copy["Name"].str.extract(r",\s(\w+)") train_copy["Title"].value_counts() train_copy["Title"] = train_copy["Title"].replace( [ "Dr", "Rev", "Major", "Col", "Lady", "Sir", "Jonkheer", "the", "Don", "Capt", "Ms", ], "Others", ) train_copy["Title"] = train_copy["Title"].replace(["Mlle", "Mme"], "Mrs") train_copy["Title"].value_counts() test_copy["Title"].value_counts() test_copy["Title"] = test_copy["Title"].replace( ["Dr", "Rev", "Col", "Dona", "Ms"], "Others" ) test_copy["Title"].value_counts() # Drop Name Feature train_copy.drop("Name", axis=1, inplace=True) test_copy.drop("Name", axis=1, inplace=True) # we can get a Married_Ladies with children out of Title and Parch Features as they have the priority in Life saving train_copy["Married_Lady"] = train_copy["Title"].apply(lambda x: 1 if x == "Mrs" else 0) test_copy["Married_Lady"] = test_copy["Title"].apply(lambda x: 1 if x == "Mrs" else 0) train_copy["With_Children"] = train_copy["Parch"].apply(lambda x: 1 if x > 0 else 0) test_copy["With_Children"] = test_copy["Parch"].apply(lambda x: 1 if x > 0 else 0) train_copy["Married_With_Childern"] = ( train_copy["Married_Lady"] + train_copy["With_Children"] ) test_copy["Married_With_Childern"] = ( test_copy["Married_Lady"] + test_copy["With_Children"] ) train_copy["Married_With_Childern"] = train_copy["Married_With_Childern"].apply( lambda x: 1 if x == 2 else 0 ) test_copy["Married_With_Childern"] = test_copy["Married_With_Childern"].apply( lambda x: 1 if x == 2 else 0 ) # Drop With_Children feature train_copy.drop("With_Children", axis=1, inplace=True) test_copy.drop("With_Children", axis=1, inplace=True) train_copy.info() # We can get number of one family on Board train_copy["Family_Members"] = train_copy["SibSp"] + train_copy["Parch"] + 1 test_copy["Family_Members"] = test_copy["SibSp"] + test_copy["Parch"] + 1 train_copy["Family_Members"].value_counts() # We can get if its single or not train_copy["Single"] = train_copy["Family_Members"].apply(lambda x: 1 if x == 1 else 0) test_copy["Single"] = test_copy["Family_Members"].apply(lambda x: 1 if x == 1 else 0) # Drop SibSp and Parch column train_copy.drop(["SibSp", "Parch"], axis=1, inplace=True) test_copy.drop(["SibSp", "Parch"], axis=1, inplace=True) train_copy.info() # We can get if Passenger is Old or Child or not, they have a priority in saving train_copy["Old"] = train_copy["Age"].apply(lambda x: 1 if x >= 55 else 0) test_copy["Old"] = test_copy["Age"].apply(lambda x: 1 if x >= 55 else 0) train_copy["Child"] = train_copy["Age"].apply(lambda x: 1 if x <= 10 else 0) test_copy["Child"] = test_copy["Age"].apply(lambda x: 1 if x <= 10 else 0) # We can get if Passenger is rich or not, they have better chance to be in open air cabins, so better chance to save train_copy["Rich"] = train_copy["Fare"].apply(lambda x: 1 if x >= 200 else 0) test_copy["Rich"] = test_copy["Fare"].apply(lambda x: 1 if x >= 200 else 0) # ### Catagorized Columns Encoding Catagorical_features = list(train_copy.select_dtypes(include=object).columns) Catagorical_features # OneHot Encoding OneHot_Encoded_train = pd.get_dummies(train_copy) OneHot_Encoded_test = pd.get_dummies(test_copy) OneHot_Encoded_train.info() OneHot_Encoded_test.info() # Labels Encoding Labels_Encoded_train = train_copy.copy() Labels_Encoded_test = test_copy.copy() for column in Catagorical_features: Labels_Encoded_train[column] = pd.factorize(Labels_Encoded_train[column])[ 0 ].reshape(-1, 1) Labels_Encoded_test[column] = pd.factorize(Labels_Encoded_test[column])[0].reshape( -1, 1 ) Labels_Encoded_train.info() Labels_Encoded_test.info() # ### Model 1 from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score x = OneHot_Encoded_train.drop("Survived", axis=1) y = OneHot_Encoded_train["Survived"] x_train, x_valid, y_train, y_valid = train_test_split( x, y, test_size=0.2, stratify=y, random_state=42 ) # Model Using different Solvers LogisticRegressionSolvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"] for solver in LogisticRegressionSolvers: model = LogisticRegression(solver=solver, max_iter=10000) model.fit(x_train, y_train) y_predict = model.predict(x_valid) print(f'Results of Logistic Regression Model 1 USing "{solver}" Solver') print("Model Accuracy = {}".format(accuracy_score(y_valid, y_predict))) print("Model Precision = {}".format(precision_score(y_valid, y_predict))) print("Model Recall = {}".format(recall_score(y_valid, y_predict))) print("") # ##### Model using `"liblinear", "newton-cg" and "lbfgs" Solvers` are so close. # Using GridSearchCV instead of Iteration like before from sklearn.model_selection import GridSearchCV LogisticRegressionSolvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"] iteration = [100, 250, 500, 1000, 2000, 5000, 10000] parameters_grid = {"solver": LogisticRegressionSolvers, "max_iter": iteration} GridSearchResult = GridSearchCV( LogisticRegression(), parameters_grid, scoring=["accuracy", "precision", "recall"], refit="accuracy", return_train_score=True, cv=4, ) GridSearchResult.fit(x, y) print(GridSearchResult.best_params_) print(GridSearchResult.best_score_) print(GridSearchResult.best_estimator_) # ### Model 2 # ##### Same as Model 1 except using Labeled Encoded data to see if the different Encoding can change the performance x = Labels_Encoded_train.drop("Survived", axis=1) y = Labels_Encoded_train["Survived"] x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=0.2, stratify=y) # Model Using different Solvers LogisticRegressionSolvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"] for solver in LogisticRegressionSolvers: model = LogisticRegression(solver=solver, max_iter=10000) model.fit(x_train, y_train) y_predict = model.predict(x_valid) print(f'Results of Logistic Regression Model 2 USing "{solver}" Solver') print("Model Accuracy = {}".format(accuracy_score(y_valid, y_predict))) print("Model Precision = {}".format(precision_score(y_valid, y_predict))) print("Model Recall = {}".format(recall_score(y_valid, y_predict))) print("") # ##### It seems that `Label Encoding` is almost the same as `OneHot Encoding` in this case. # Using GridSearchCV instead of Iteration like before GridSearchResult.fit(x, y) print(GridSearchResult.best_params_) print(GridSearchResult.best_score_) print(GridSearchResult.best_estimator_) # ### Model 3 # ##### Trying to use valuable features that may enhance the model OneHot_Encoded_train.columns selected_features = [ "Pclass", "Age", "Married_With_Childern", "Family_Members", "Single", "Child", "Sex_female", "Sex_male", "Title_Master", "Title_Miss", "Title_Mr", "Title_Mrs", "Title_Others", ] # Selected Datasets selected_train = OneHot_Encoded_train[selected_features + ["Survived"]] selected_test = OneHot_Encoded_test[selected_features] x = selected_train.drop("Survived", axis=1) y = selected_train["Survived"] x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=0.2, stratify=y) # Model Using different Solvers LogisticRegressionSolvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"] for solver in LogisticRegressionSolvers: model = LogisticRegression(solver=solver, max_iter=10000) model.fit(x_train, y_train) y_predict = model.predict(x_valid) print(f'Results of Logistic Regression Model 2 USing "{solver}" Solver') print("Model Accuracy = {}".format(accuracy_score(y_valid, y_predict))) print("Model Precision = {}".format(precision_score(y_valid, y_predict))) print("Model Recall = {}".format(recall_score(y_valid, y_predict))) print("") # Using GridSearchCV instead of Iteration like before GridSearchResult.fit(x, y) print(GridSearchResult.best_params_) print(GridSearchResult.best_score_) print(GridSearchResult.best_estimator_) # ### Result # - As shown below, the best model we have got is the `third model`. best_model = LogisticRegression(solver="liblinear", max_iter=100) best_model.fit(x, y) test_predict = best_model.predict(selected_test) # Saving test predictions to file output = pd.DataFrame({"PassengerId": selected_test.index, "Survived": test_predict}) output.to_csv("submission.csv", index=False)
false
0
4,604
1
4,604
4,604
69345693
# Make a copy of this notebook and submit the link in Google Classroom. To submit the link to a Kaggle, click Share > Private > Switch it to Public > copy the link and submit in Classroom under the assignment. # # 1. Printing Text # Please print out your name! # print("Zoey") # # 2. Variables/Math Operators: Counting Cookies! # ## Tasks/Objectives # 1. Create a variable called cookies with value 30. (You have 30 cookies in a jar). # 2. Create a variable called jars with value 5. (You have 5 jars) # 3. Print the following message: "I have 30 cookies!". Make sure to use the variable, cookies, to put the 30, instead of just typing 30. # 4. Use math to calculate the total number of cookies, if you have 5 jars with 30 cookies each! Multiply using variables. # # Remember to use variables throughout the entire task. cookies = 30 jars = 5 print("I have " + str(cookies) + " cookies!") print("The total number of cookies is " + str(cookies * jars)) # # 3. String Manipulation # Each DNA strand has 8 characters comprised of A, G, T, C. # 1. Given the a,t,g, and c variables, try to make a DNA strand by concatenating (adding) variables. Any sequence works, you get to decide what nucleotides go in it. hint: you can do: sequence = a+t. Print sequence and see what you get. # 2. Replace A in the DNA strand with T. # 3. Make the DNA strand lowercase. (You can google how to do this!) # 4. Print out the length of the DNA strand to verify that you have all 8 characters in the format of "The length of the DNA strand is: (length of the DNA strand) (Google if you forgot how to find the LENGTH of a STRING) # # given variables a = "A" g = "g" t = "t" c = "c" # your code here sequence = a + t + c + g + c + t + a + a print(sequence) print(sequence.replace("A", "t")) a = a.lower() DNAlength = len(sequence) print("The length of the DNA strand is " + str(DNAlength) + ".") # # 4. Iteration/Lists # ## Exercise 1: The Counter # Print all numbers from 0-10 using a for loop # ## Exercise 2 # Use the 2 given lists to # 1. Combine the lists # 2. Print the list altogether # 3. Print every individual element of the combined list # 4. Sort the list by alphabetical order # 5. Print the length of the combined list # 6. Print the first element in the combined list # ## Exercise 3: Odometer # When someone is driving on the highway, there is no real speed limit. However, when someone is driving on a residential road, the speed limit is 25. To help our police officers reinforce that, we will be writing a simple program. When ```road_state``` is 0, that means the driver is on the freeway and there is no speed limit. However, while they are on ```road_state = 1```, they must obey the 25 mph speed limit. Please remind them by recognizing when they are on the residential roads and need to be going 25 mph by printing a statement that tells them to do so. # Print all numbers from 0 to 10 using a for loop for number in range(11): print(number) # Exercise 2 # given lists cookies = ["Chocolate Chip", "Raisin", "White Chocolate Chip", "Sugar"] ice_cream = ["Mint Chocolate Chip", "Cookie Dough", "Chocolate Chip"] # your code here combined = cookies + ice_cream print(combined) for item in combined: print(item) combined.sort() print(len(combined)) print(combined[0]) # Exercise 3: Odometer # given variables road_state = 0 # your code here [below] if road_state == 0: print("You are on the freeway and there is no speed limit.") elif road_state == 1: print("You are on a residential road and you must obey the 25 mph speed limit.") # # 5. Conditional Statements # Assign 8 to the variable x and 15 to the variable y. # In the same cell, create 2 conditional statements. # Let the first one print "At least one of the conditions is satisfied." if x is greater than 3 or y is less than or equal to 10. # Let the second one print "Neither condition is satisfied." if x is less than or equal to 3 or y is greater than 17. # Change the values assigned to x and y and re-run the cell to verify your code still works. # remove the hashtags to undisable the code x = 2 y = 9 if x > 3 or y <= 10: print("At least one of the conditions is satisfied.") if x <= 3 or y > 17: print("Neither condition is satisfied.")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/345/69345693.ipynb
null
null
[{"Id": 69345693, "ScriptId": 18916457, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7996771, "CreationDate": "07/29/2021 21:10:34", "VersionNumber": 1.0, "Title": "Introduction to Python Exercises - Helyx Summer Ca", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 134.0, "LinesInsertedFromPrevious": 40.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 94.0, "LinesInsertedFromFork": 40.0, "LinesDeletedFromFork": 4.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 94.0, "TotalVotes": 0}]
null
null
null
null
# Make a copy of this notebook and submit the link in Google Classroom. To submit the link to a Kaggle, click Share > Private > Switch it to Public > copy the link and submit in Classroom under the assignment. # # 1. Printing Text # Please print out your name! # print("Zoey") # # 2. Variables/Math Operators: Counting Cookies! # ## Tasks/Objectives # 1. Create a variable called cookies with value 30. (You have 30 cookies in a jar). # 2. Create a variable called jars with value 5. (You have 5 jars) # 3. Print the following message: "I have 30 cookies!". Make sure to use the variable, cookies, to put the 30, instead of just typing 30. # 4. Use math to calculate the total number of cookies, if you have 5 jars with 30 cookies each! Multiply using variables. # # Remember to use variables throughout the entire task. cookies = 30 jars = 5 print("I have " + str(cookies) + " cookies!") print("The total number of cookies is " + str(cookies * jars)) # # 3. String Manipulation # Each DNA strand has 8 characters comprised of A, G, T, C. # 1. Given the a,t,g, and c variables, try to make a DNA strand by concatenating (adding) variables. Any sequence works, you get to decide what nucleotides go in it. hint: you can do: sequence = a+t. Print sequence and see what you get. # 2. Replace A in the DNA strand with T. # 3. Make the DNA strand lowercase. (You can google how to do this!) # 4. Print out the length of the DNA strand to verify that you have all 8 characters in the format of "The length of the DNA strand is: (length of the DNA strand) (Google if you forgot how to find the LENGTH of a STRING) # # given variables a = "A" g = "g" t = "t" c = "c" # your code here sequence = a + t + c + g + c + t + a + a print(sequence) print(sequence.replace("A", "t")) a = a.lower() DNAlength = len(sequence) print("The length of the DNA strand is " + str(DNAlength) + ".") # # 4. Iteration/Lists # ## Exercise 1: The Counter # Print all numbers from 0-10 using a for loop # ## Exercise 2 # Use the 2 given lists to # 1. Combine the lists # 2. Print the list altogether # 3. Print every individual element of the combined list # 4. Sort the list by alphabetical order # 5. Print the length of the combined list # 6. Print the first element in the combined list # ## Exercise 3: Odometer # When someone is driving on the highway, there is no real speed limit. However, when someone is driving on a residential road, the speed limit is 25. To help our police officers reinforce that, we will be writing a simple program. When ```road_state``` is 0, that means the driver is on the freeway and there is no speed limit. However, while they are on ```road_state = 1```, they must obey the 25 mph speed limit. Please remind them by recognizing when they are on the residential roads and need to be going 25 mph by printing a statement that tells them to do so. # Print all numbers from 0 to 10 using a for loop for number in range(11): print(number) # Exercise 2 # given lists cookies = ["Chocolate Chip", "Raisin", "White Chocolate Chip", "Sugar"] ice_cream = ["Mint Chocolate Chip", "Cookie Dough", "Chocolate Chip"] # your code here combined = cookies + ice_cream print(combined) for item in combined: print(item) combined.sort() print(len(combined)) print(combined[0]) # Exercise 3: Odometer # given variables road_state = 0 # your code here [below] if road_state == 0: print("You are on the freeway and there is no speed limit.") elif road_state == 1: print("You are on a residential road and you must obey the 25 mph speed limit.") # # 5. Conditional Statements # Assign 8 to the variable x and 15 to the variable y. # In the same cell, create 2 conditional statements. # Let the first one print "At least one of the conditions is satisfied." if x is greater than 3 or y is less than or equal to 10. # Let the second one print "Neither condition is satisfied." if x is less than or equal to 3 or y is greater than 17. # Change the values assigned to x and y and re-run the cell to verify your code still works. # remove the hashtags to undisable the code x = 2 y = 9 if x > 3 or y <= 10: print("At least one of the conditions is satisfied.") if x <= 3 or y > 17: print("Neither condition is satisfied.")
false
0
1,286
0
1,286
1,286
69345275
# import numpy as np # linear algebra # import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # # Input data files are available in the read-only "../input/" directory # # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory # import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Importing Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # # Reading Files train_df = pd.read_csv("../input/titanic/train.csv") train_df.head() train_df = train_df.drop("PassengerId", axis=1) test_df = pd.read_csv("../input/titanic/test.csv") X_test = test_df.drop(columns=["PassengerId"]) # train_df.hist() train_df.info() train_df.head() # train_df.Ticket.value_counts() # # Missing Values train_df.isnull().sum().sort_values(ascending=False).head(20) X_test.isnull().sum().sort_values(ascending=False).head(20) # **Cabin Col** # we will impute the nan vals with 0 to represent "No-Capin" cluster train_df["Cabin"] = train_df["Cabin"].fillna("Z") X_test["Cabin"] = X_test["Cabin"].fillna("Z") # **Age Col** train_df.Age.describe() train_df[["Age"]].plot.box() train_df.Age.median() # from this **statistics** we will impute with the **median** due to the **outliers** train_df["Age"] = train_df["Age"].fillna(value=train_df.Age.mean()) X_test["Age"] = X_test["Age"].fillna(value=test_df.Age.mean()) # **Embarked** # as this feature represent from which port the passenger get into the ship, so it will have a very good relation with the **Fare** as fare represent how much they spend in the trip which depends on the distance which leads to the port also has relation with Pclass col so we should invistigate on that. # Lets check which rows have null Embarked column train_df[train_df["Embarked"].isnull()] # Nulls have **Pclass 1** and **fare $$80**. # Lets plot a graph to **visualize** and try to guess from where they embarked sns.boxplot(x="Embarked", y="Fare", hue="Pclass", data=train_df) # so we will impute with **C** as its **Fare** is around **80$** at **Pclass1** train_df["Embarked"] = train_df["Embarked"].fillna("C") X_test.isnull().sum().sort_values(ascending=False).head(20) # **Fare Col** # Lets check which rows have null Fare column X_test[X_test["Fare"].isnull()] # we can replace missing value in fare by taking **median** of all fares of those passengers who share **3 Pclass** and **Embarked** from **'S'** median_fare = X_test[(X_test["Pclass"] == 3) & (X_test["Embarked"] == "S")][ "Fare" ].median() print(median_fare) X_test["Fare"] = X_test["Fare"].fillna(median_fare) # # Feature Engineering # we will add a new feature "**Deck**" to represent at which **level** were passenger on the ship train_df["Deck"] = train_df.Cabin.str[0] X_test["Deck"] = X_test.Cabin.str[0] train_df["Deck"].unique() # Z is for null values # we will add a new feature "**FamSize**" to represent **how big is the family** of the passenger at the ship including the passenger train_df["FamSize"] = train_df["SibSp"] + train_df["Parch"] + 1 X_test["FamSize"] = X_test["SibSp"] + X_test["Parch"] + 1 # we can also **clustering** the **FamSize** Col # Discretize family size train_df.loc[train_df["FamSize"] == 1, "FSCluster"] = "single" train_df.loc[ (train_df["FamSize"] > 1) & (train_df["FamSize"] < 5), "FSCluster" ] = "small" train_df.loc[train_df["FamSize"] > 4, "FSCluster"] = "large" X_test.loc[X_test["FamSize"] == 1, "FSCluster"] = "singleton" X_test.loc[(X_test["FamSize"] > 1) & (X_test["FamSize"] < 5), "FSCluster"] = "small" X_test.loc[X_test["FamSize"] > 4, "FSCluster"] = "large" print(train_df["FSCluster"].unique()) print(train_df["FSCluster"].value_counts()) # getting the name length as it may be a sign for the **Social Honor** as the Honor is increasing the passenger will give his full name of his parent and grandparents so the name will be longer so it may be a good sign for the Honor of passengers besides the other features train_df["NameLength"] = train_df["Name"].apply(lambda x: len(x)) X_test["NameLength"] = X_test["Name"].apply(lambda x: len(x)) # Whats in the name? # we will try to extract the **titles** for each passenger import re # A function to get the title from a name. def get_title(name): # Use a regular expression to search for a title. Titles always consist of capital and lowercase letters, and end with a period. title_search = re.search(" ([A-Za-z]+)\.", name) # If the title exists, extract and return it. if title_search: return title_search.group(1) return "" # Get all the titles and print how often each one occurs. train_df["Title"] = train_df["Name"].apply(get_title) # print(pd.value_counts(train_df["Title"])) X_test["Title"] = X_test["Name"].apply(get_title) # print(pd.value_counts(test_df["Title"])) # **Dropping** the Unwanted Cols train_df = train_df.drop(["Ticket", "Name", "Cabin"], axis=1) X_test = X_test.drop(["Ticket", "Name", "Cabin"], axis=1) # Checking for **Normalization** or **Scaling** train_df.var() from sklearn.preprocessing import StandardScaler std_scale = StandardScaler().fit(train_df[["Age", "Fare"]]) train_df[["Age", "Fare"]] = std_scale.transform(train_df[["Age", "Fare"]]) std_scale = StandardScaler().fit(X_test[["Age", "Fare"]]) X_test[["Age", "Fare"]] = std_scale.transform(X_test[["Age", "Fare"]]) train_df.var() train_df.Age.plot.kde() train_df.Fare.plot.kde() # # Encoding train_df.head() from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelEnc = LabelEncoder() cat_vars = ["Embarked", "Sex", "Title", "Deck", "FSCluster", "Title"] for col in cat_vars: train_df[col] = labelEnc.fit_transform(train_df[col]) X_test[col] = labelEnc.fit_transform(X_test[col]) train_df.head() # # Visualize sns.pairplot(train_df) # Showing the **Correlations** train_df.corr()["Survived"] print(train_df.shape) print(test_df.shape) # # Splitting Data # Create a data with all columns except Survived X_train = train_df.drop("Survived", axis=1) # Create a Survived labels dataset y_train = train_df[["Survived"]] # importing sklearn from sklearn.model_selection import train_test_split # split up the dataset volunteer_subset dataset X_train, X_valid, y_train, y_valid = train_test_split( X_train, y_train, test_size=0.2, random_state=10 ) # # Logistic Regression # Logistic Regression from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score log_reg = LogisticRegression(solver="liblinear") log_reg.fit(X_train, y_train) # Y_Prediction y_pred = log_reg.predict(X_valid) score = accuracy_score(y_valid, Y_pred) print("Accuracy score : ", score) # # Preparing Submission File # Prediction for Dest Data y_test_predicted = log_reg.predict(X_test) test_df["Survived"] = y_test_predicted test_df.head() test_df[["PassengerId", "Survived"]].to_csv( "/kaggle/working/gender_submission.csv", index=False )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/345/69345275.ipynb
null
null
[{"Id": 69345275, "ScriptId": 18928749, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7210666, "CreationDate": "07/29/2021 20:59:19", "VersionNumber": 2.0, "Title": "Titanic", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 243.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 242.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# import numpy as np # linear algebra # import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # # Input data files are available in the read-only "../input/" directory # # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory # import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Importing Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # # Reading Files train_df = pd.read_csv("../input/titanic/train.csv") train_df.head() train_df = train_df.drop("PassengerId", axis=1) test_df = pd.read_csv("../input/titanic/test.csv") X_test = test_df.drop(columns=["PassengerId"]) # train_df.hist() train_df.info() train_df.head() # train_df.Ticket.value_counts() # # Missing Values train_df.isnull().sum().sort_values(ascending=False).head(20) X_test.isnull().sum().sort_values(ascending=False).head(20) # **Cabin Col** # we will impute the nan vals with 0 to represent "No-Capin" cluster train_df["Cabin"] = train_df["Cabin"].fillna("Z") X_test["Cabin"] = X_test["Cabin"].fillna("Z") # **Age Col** train_df.Age.describe() train_df[["Age"]].plot.box() train_df.Age.median() # from this **statistics** we will impute with the **median** due to the **outliers** train_df["Age"] = train_df["Age"].fillna(value=train_df.Age.mean()) X_test["Age"] = X_test["Age"].fillna(value=test_df.Age.mean()) # **Embarked** # as this feature represent from which port the passenger get into the ship, so it will have a very good relation with the **Fare** as fare represent how much they spend in the trip which depends on the distance which leads to the port also has relation with Pclass col so we should invistigate on that. # Lets check which rows have null Embarked column train_df[train_df["Embarked"].isnull()] # Nulls have **Pclass 1** and **fare $$80**. # Lets plot a graph to **visualize** and try to guess from where they embarked sns.boxplot(x="Embarked", y="Fare", hue="Pclass", data=train_df) # so we will impute with **C** as its **Fare** is around **80$** at **Pclass1** train_df["Embarked"] = train_df["Embarked"].fillna("C") X_test.isnull().sum().sort_values(ascending=False).head(20) # **Fare Col** # Lets check which rows have null Fare column X_test[X_test["Fare"].isnull()] # we can replace missing value in fare by taking **median** of all fares of those passengers who share **3 Pclass** and **Embarked** from **'S'** median_fare = X_test[(X_test["Pclass"] == 3) & (X_test["Embarked"] == "S")][ "Fare" ].median() print(median_fare) X_test["Fare"] = X_test["Fare"].fillna(median_fare) # # Feature Engineering # we will add a new feature "**Deck**" to represent at which **level** were passenger on the ship train_df["Deck"] = train_df.Cabin.str[0] X_test["Deck"] = X_test.Cabin.str[0] train_df["Deck"].unique() # Z is for null values # we will add a new feature "**FamSize**" to represent **how big is the family** of the passenger at the ship including the passenger train_df["FamSize"] = train_df["SibSp"] + train_df["Parch"] + 1 X_test["FamSize"] = X_test["SibSp"] + X_test["Parch"] + 1 # we can also **clustering** the **FamSize** Col # Discretize family size train_df.loc[train_df["FamSize"] == 1, "FSCluster"] = "single" train_df.loc[ (train_df["FamSize"] > 1) & (train_df["FamSize"] < 5), "FSCluster" ] = "small" train_df.loc[train_df["FamSize"] > 4, "FSCluster"] = "large" X_test.loc[X_test["FamSize"] == 1, "FSCluster"] = "singleton" X_test.loc[(X_test["FamSize"] > 1) & (X_test["FamSize"] < 5), "FSCluster"] = "small" X_test.loc[X_test["FamSize"] > 4, "FSCluster"] = "large" print(train_df["FSCluster"].unique()) print(train_df["FSCluster"].value_counts()) # getting the name length as it may be a sign for the **Social Honor** as the Honor is increasing the passenger will give his full name of his parent and grandparents so the name will be longer so it may be a good sign for the Honor of passengers besides the other features train_df["NameLength"] = train_df["Name"].apply(lambda x: len(x)) X_test["NameLength"] = X_test["Name"].apply(lambda x: len(x)) # Whats in the name? # we will try to extract the **titles** for each passenger import re # A function to get the title from a name. def get_title(name): # Use a regular expression to search for a title. Titles always consist of capital and lowercase letters, and end with a period. title_search = re.search(" ([A-Za-z]+)\.", name) # If the title exists, extract and return it. if title_search: return title_search.group(1) return "" # Get all the titles and print how often each one occurs. train_df["Title"] = train_df["Name"].apply(get_title) # print(pd.value_counts(train_df["Title"])) X_test["Title"] = X_test["Name"].apply(get_title) # print(pd.value_counts(test_df["Title"])) # **Dropping** the Unwanted Cols train_df = train_df.drop(["Ticket", "Name", "Cabin"], axis=1) X_test = X_test.drop(["Ticket", "Name", "Cabin"], axis=1) # Checking for **Normalization** or **Scaling** train_df.var() from sklearn.preprocessing import StandardScaler std_scale = StandardScaler().fit(train_df[["Age", "Fare"]]) train_df[["Age", "Fare"]] = std_scale.transform(train_df[["Age", "Fare"]]) std_scale = StandardScaler().fit(X_test[["Age", "Fare"]]) X_test[["Age", "Fare"]] = std_scale.transform(X_test[["Age", "Fare"]]) train_df.var() train_df.Age.plot.kde() train_df.Fare.plot.kde() # # Encoding train_df.head() from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelEnc = LabelEncoder() cat_vars = ["Embarked", "Sex", "Title", "Deck", "FSCluster", "Title"] for col in cat_vars: train_df[col] = labelEnc.fit_transform(train_df[col]) X_test[col] = labelEnc.fit_transform(X_test[col]) train_df.head() # # Visualize sns.pairplot(train_df) # Showing the **Correlations** train_df.corr()["Survived"] print(train_df.shape) print(test_df.shape) # # Splitting Data # Create a data with all columns except Survived X_train = train_df.drop("Survived", axis=1) # Create a Survived labels dataset y_train = train_df[["Survived"]] # importing sklearn from sklearn.model_selection import train_test_split # split up the dataset volunteer_subset dataset X_train, X_valid, y_train, y_valid = train_test_split( X_train, y_train, test_size=0.2, random_state=10 ) # # Logistic Regression # Logistic Regression from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score log_reg = LogisticRegression(solver="liblinear") log_reg.fit(X_train, y_train) # Y_Prediction y_pred = log_reg.predict(X_valid) score = accuracy_score(y_valid, Y_pred) print("Accuracy score : ", score) # # Preparing Submission File # Prediction for Dest Data y_test_predicted = log_reg.predict(X_test) test_df["Survived"] = y_test_predicted test_df.head() test_df[["PassengerId", "Survived"]].to_csv( "/kaggle/working/gender_submission.csv", index=False )
false
0
2,476
0
2,476
2,476
69345475
# # Mansoura Group1-05 # * **Ahmed Nabil Ibrahim Awaad** # * **khaled osama mosaad** # * **Mohammed Ayman Mohammed Samaha** # ## Import the libraries # We'll use `pandas` to load and manipulate the data. Other libraries will be imported in the relevant sections. import pandas as pd import os import xml.etree.ElementTree as ET import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier pd.set_option("display.max_columns", None) # data path dataset_path = "/kaggle/input/car-crashes-severity-prediction/" # ## Exploratory Data Analysis # ### Reading Holidays XML file # Reading Holidays # create a function to convert holidays xml file to dataframe def xml_to_df(file_path): tree = ET.parse(file_path) root = tree.getroot() columns = ["date", "holiday"] holidays_df = pd.DataFrame(columns=columns) for node in root: date = node.find("date").text holiday = 1 holidays_df = holidays_df.append( pd.Series([date, holiday], index=columns), ignore_index=True ) holidays_df["date"] = pd.to_datetime(holidays_df["date"]) return holidays_df holidays_df = xml_to_df(os.path.join(dataset_path, "holidays.xml")) holidays_df.head() holidays_df.info() # ### Reading Weather CSV file # Read Weather file weather_df = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv")) weather_df.head() # collect the date in one column weather_df["date"] = ( weather_df["Year"].astype(str) + "-" + weather_df["Month"].astype(str) + "-" + weather_df["Day"].astype(str) + " " + weather_df["Hour"].astype(str) + ":00:00" ) weather_df["date"] = pd.to_datetime(weather_df["date"]) # dropping duplicate weather info at the same date weather_df.drop_duplicates(subset="date", inplace=True) # encode # use this encoder to encode le = LabelEncoder() weather_df.Weather_Condition = le.fit_transform( weather_df.Weather_Condition.astype("|S").values ) weather_df.Selected = le.fit_transform(weather_df.Selected.astype("|S").values) weather_df.head(3) weather_df.info() # ### Reading training data CSV file df = pd.read_csv(os.path.join(dataset_path, "train.csv")) print("The shape of the dataset is {}.\n\n".format(df.shape)) df.head() df.info() # define a function to prepare date as it apear in other data sets to use it in merging def Prepare_date(dataframe): dataframe["timestamp"] = pd.to_datetime(dataframe["timestamp"]) dataframe["date_to_nearest_day"] = pd.to_datetime(dataframe["timestamp"].dt.date) dataframe["hours"] = dataframe["timestamp"].dt.hour dataframe["time_to_nearest_hour"] = dataframe["timestamp"].dt.floor("h") dataframe["dayofweek"] = dataframe.timestamp.dt.dayofweek return dataframe df = Prepare_date(df) df.info() # Merge weather dataframe def merge_weather(dataframe, weather_dataframe): dataframe = dataframe.merge( weather_dataframe, left_on="time_to_nearest_hour", right_on="date", how="left" ) return dataframe # Merge holiday dataframe def merge_holidays(dataframe, holidays_dataframe): dataframe = dataframe.merge( holidays_dataframe, left_on="date_to_nearest_day", right_on="date", how="left" ) dataframe["holiday"] = dataframe["holiday"].fillna(0) return dataframe df = merge_weather(df, weather_df) df = merge_holidays(df, holidays_df) # check df.head() df.info() def drop_na(dataframe, col_lst): dataframe.dropna(subset=col_lst, inplace=True) col_lst = ["Wind_Speed(mph)", "Humidity(%)"] drop_na(df, col_lst) # df.fillna(df.mean(), inplace=True) df.info() # describe dataset df.drop(columns="ID").describe(include="all", datetime_is_numeric=True) # # encode # def encode_columns(dataframe): # dataframe = pd.get_dummies(dataframe, columns = ['Weather_Condition','Crossing','Give_Way','Junction','No_Exit','Railway','Stop','Amenity','Side']) # return dataframe # encode # use this encoder to encode le = LabelEncoder() def encode_columns(dataframe, le): dataframe.Crossing = le.fit_transform(dataframe.Crossing.values) dataframe.Give_Way = le.fit_transform(dataframe.Give_Way.values) dataframe.Junction = le.fit_transform(dataframe.Junction.values) dataframe.No_Exit = le.fit_transform(dataframe.No_Exit.values) dataframe.Railway = le.fit_transform(dataframe.Railway.values) dataframe.Stop = le.fit_transform(dataframe.Stop.values) dataframe.Amenity = le.fit_transform(dataframe.Amenity.values) dataframe.Side = le.fit_transform(dataframe.Side.values) return dataframe df = encode_columns(df, le) df.head() # get the correlation matrix df.corr() # droping unneeded columns def drop_unneeded_columns(dataframe, c_lst): dataframe = dataframe.drop(columns=c_lst) return dataframe column_lst = [ "date_to_nearest_day", "hours", "time_to_nearest_hour", "Selected", "Bump", "Roundabout", "Wind_Chill(F)", "Precipitation(in)", "date_x", "date_y", ] df = drop_unneeded_columns(df, column_lst) df.corr().Severity.sort_values() plt.subplots(figsize=(15, 10)) plt.plot( (df.corr().Severity.drop("Severity").sort_values()), color="purple", lw=1, ls="--", marker="o", markersize=4, ) plt.xticks(rotation=90) plt.xlabel("Features") plt.ylabel("Correlation With Severity") plt.grid() # # # # Modify the accident class value to combine them in one column def modify_columns(df1): df1.Give_Way.replace(to_replace=1, value=2, inplace=True) df1.Junction.replace(to_replace=1, value=4, inplace=True) df1.No_Exit.replace(to_replace=1, value=8, inplace=True) df1.Railway.replace(to_replace=1, value=16, inplace=True) df1.Stop.replace(to_replace=1, value=16, inplace=True) df1.Amenity.replace(to_replace=1, value=32, inplace=True) df1.Side.replace(to_replace=1, value=64, inplace=True) df1.holiday.replace(to_replace=0, value=2, inplace=True) df1["Accident_class"] = ( df1["Distance(mi)"] + df1["Crossing"] + df1["Give_Way"] + df1["Junction"] + df1["No_Exit"] + df1["Railway"] + df1["Stop"] + df1["Amenity"] + df1["Side"] ).astype(int) modify_columns(df) df.head() # ## Plot features vs. Severity df1 = df.copy().drop( columns=[ "Crossing", "Give_Way", "Junction", "No_Exit", "Railway", "Stop", "Amenity", "Side", ] ) df1.head() s1 = df1[df1["Severity"] == 1] s2 = df1[df1["Severity"] == 2] s3 = df1[df1["Severity"] == 3] s4 = df1[df1["Severity"] == 4] def plot_feature_kde(feature): s1[feature].plot.kde() s2[feature].plot.kde() s3[feature].plot.kde() s4[feature].plot.kde() plt.legend(labels=["S1", "S2", "S3", "S4"]) plot_feature_kde("Lat") plot_feature_kde("Lng") plot_feature_kde("Distance(mi)") plot_feature_kde("Temperature(F)") plot_feature_kde("Visibility(mi)") def plot_feature_kde(s, features_lst): plt.subplots(figsize=(20, 10)) for feature in features_lst: s[feature].plot.kde() plt.legend() feature_lst = [ "Lat", "Lng", "Distance(mi)", "dayofweek", "Year", "Day", "Month", "Hour", "Weather_Condition", "Temperature(F)", "Humidity(%)", "Wind_Speed(mph)", "Visibility(mi)", "Accident_class", ] plot_feature_kde(s1, feature_lst) plot_feature_kde(s2, feature_lst) plot_feature_kde(s3, feature_lst) plot_feature_kde(s4, feature_lst) # ## Remove the outliers # Remove the outliers from Temperature Q1 = df["Temperature(F)"].quantile(0.25) Q3 = df["Temperature(F)"].quantile(0.75) IQR = Q3 - Q1 filter_w = (df["Temperature(F)"] >= Q1 - 1.5 * IQR) & ( df["Temperature(F)"] <= Q3 + 1.5 * IQR ) df = df.loc[filter_w] # ## Data Splitting # Now it's time to split the dataset for the training step. Typically the dataset is split into 3 subsets, namely, the training, validation and test sets. In our case, the test set is already predefined. So we'll split the "training" set into training and validation sets with 0.8:0.2 ratio. # *Note: a good way to generate reproducible results is to set the seed to the algorithms that depends on randomization. This is done with the argument `random_state` in the following command* # train_df, val_df = train_test_split(df, test_size=0.2, random_state=42) # Try adding `stratify` here train_df, val_df = train_test_split( df, test_size=0.2, random_state=42, stratify=df["Severity"] ) X_train = train_df.drop(columns=["ID", "Severity"]) y_train = train_df["Severity"] X_val = val_df.drop(columns=["ID", "Severity"]) y_val = val_df["Severity"] X_train.head() allFeatures = [ "Lat", "Lng", "Distance(mi)", "Year", "Month", "Hour", "Temperature(F)", "dayofweek", "Wind_Speed(mph)", "Weather_Condition", "Accident_class", ] X_train = X_train[allFeatures] X_val = X_val[allFeatures] X_train # ## Model Training # Let's train a model with the data! We'll train a Random Forest Classifier to demonstrate the process of making submissions. # Create an instance of the classifier classifier = RandomForestClassifier(max_depth=2, random_state=0) # Train the classifierAccident_class classifier = classifier.fit(X_train, y_train) # Now let's test our classifier on the validation dataset and see the accuracy. print( "The accuracy of the classifier on the validation set is ", (classifier.score(X_val, y_val)), ) importance = classifier.feature_importances_ for i, v in zip(allFeatures, importance): print(f"Feature: {i}, Score: {round(v,5)}") plt.subplots(figsize=(20, 10)) plt.bar(allFeatures, importance) plt.xticks(rotation=90) plt.xlabel("Features") plt.ylabel("Importance") # plt.savefig('plot3.png', dpi=300 ,bbox_inches='tight'); # Well. That's a good start, right? A classifier that predicts all examples' `Severity` as 2 will get around 0.63. You should get better score as you add more features and do better data preprocessing. # ## Submission File Generation # We have built a model and we'd like to submit our predictions on the test set! In order to do that, we'll load the test set, predict the class and save the submission file. # First, we'll load the data. test_df = pd.read_csv(os.path.join(dataset_path, "test.csv")) test_df.head() # prepare test dataset test_df = Prepare_date(test_df) test_df = merge_weather(test_df, weather_df) test_df = merge_holidays(test_df, holidays_df) drop_na(df, col_lst) test_df = encode_columns(test_df, le) test_df = drop_unneeded_columns(test_df, column_lst) test_df["Wind_Speed(mph)"].fillna(df["Wind_Speed(mph)"].mean(), inplace=True) modify_columns(test_df) test_df.head() test_df.info() X_test = test_df.drop(columns=["ID"]) # You should update/remove the next line once you change the features used for training X_test = X_test[allFeatures] y_test_predicted = classifier.predict(X_test) test_df["Severity"] = y_test_predicted test_df.head() # Now we're ready to generate the submission file. The submission file needs the columns `ID` and `Severity` only. test_df[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/345/69345475.ipynb
null
null
[{"Id": 69345475, "ScriptId": 18881861, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6206578, "CreationDate": "07/29/2021 21:04:37", "VersionNumber": 4.0, "Title": "car-crashes-severity", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 341.0, "LinesInsertedFromPrevious": 11.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 330.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
# # Mansoura Group1-05 # * **Ahmed Nabil Ibrahim Awaad** # * **khaled osama mosaad** # * **Mohammed Ayman Mohammed Samaha** # ## Import the libraries # We'll use `pandas` to load and manipulate the data. Other libraries will be imported in the relevant sections. import pandas as pd import os import xml.etree.ElementTree as ET import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier pd.set_option("display.max_columns", None) # data path dataset_path = "/kaggle/input/car-crashes-severity-prediction/" # ## Exploratory Data Analysis # ### Reading Holidays XML file # Reading Holidays # create a function to convert holidays xml file to dataframe def xml_to_df(file_path): tree = ET.parse(file_path) root = tree.getroot() columns = ["date", "holiday"] holidays_df = pd.DataFrame(columns=columns) for node in root: date = node.find("date").text holiday = 1 holidays_df = holidays_df.append( pd.Series([date, holiday], index=columns), ignore_index=True ) holidays_df["date"] = pd.to_datetime(holidays_df["date"]) return holidays_df holidays_df = xml_to_df(os.path.join(dataset_path, "holidays.xml")) holidays_df.head() holidays_df.info() # ### Reading Weather CSV file # Read Weather file weather_df = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv")) weather_df.head() # collect the date in one column weather_df["date"] = ( weather_df["Year"].astype(str) + "-" + weather_df["Month"].astype(str) + "-" + weather_df["Day"].astype(str) + " " + weather_df["Hour"].astype(str) + ":00:00" ) weather_df["date"] = pd.to_datetime(weather_df["date"]) # dropping duplicate weather info at the same date weather_df.drop_duplicates(subset="date", inplace=True) # encode # use this encoder to encode le = LabelEncoder() weather_df.Weather_Condition = le.fit_transform( weather_df.Weather_Condition.astype("|S").values ) weather_df.Selected = le.fit_transform(weather_df.Selected.astype("|S").values) weather_df.head(3) weather_df.info() # ### Reading training data CSV file df = pd.read_csv(os.path.join(dataset_path, "train.csv")) print("The shape of the dataset is {}.\n\n".format(df.shape)) df.head() df.info() # define a function to prepare date as it apear in other data sets to use it in merging def Prepare_date(dataframe): dataframe["timestamp"] = pd.to_datetime(dataframe["timestamp"]) dataframe["date_to_nearest_day"] = pd.to_datetime(dataframe["timestamp"].dt.date) dataframe["hours"] = dataframe["timestamp"].dt.hour dataframe["time_to_nearest_hour"] = dataframe["timestamp"].dt.floor("h") dataframe["dayofweek"] = dataframe.timestamp.dt.dayofweek return dataframe df = Prepare_date(df) df.info() # Merge weather dataframe def merge_weather(dataframe, weather_dataframe): dataframe = dataframe.merge( weather_dataframe, left_on="time_to_nearest_hour", right_on="date", how="left" ) return dataframe # Merge holiday dataframe def merge_holidays(dataframe, holidays_dataframe): dataframe = dataframe.merge( holidays_dataframe, left_on="date_to_nearest_day", right_on="date", how="left" ) dataframe["holiday"] = dataframe["holiday"].fillna(0) return dataframe df = merge_weather(df, weather_df) df = merge_holidays(df, holidays_df) # check df.head() df.info() def drop_na(dataframe, col_lst): dataframe.dropna(subset=col_lst, inplace=True) col_lst = ["Wind_Speed(mph)", "Humidity(%)"] drop_na(df, col_lst) # df.fillna(df.mean(), inplace=True) df.info() # describe dataset df.drop(columns="ID").describe(include="all", datetime_is_numeric=True) # # encode # def encode_columns(dataframe): # dataframe = pd.get_dummies(dataframe, columns = ['Weather_Condition','Crossing','Give_Way','Junction','No_Exit','Railway','Stop','Amenity','Side']) # return dataframe # encode # use this encoder to encode le = LabelEncoder() def encode_columns(dataframe, le): dataframe.Crossing = le.fit_transform(dataframe.Crossing.values) dataframe.Give_Way = le.fit_transform(dataframe.Give_Way.values) dataframe.Junction = le.fit_transform(dataframe.Junction.values) dataframe.No_Exit = le.fit_transform(dataframe.No_Exit.values) dataframe.Railway = le.fit_transform(dataframe.Railway.values) dataframe.Stop = le.fit_transform(dataframe.Stop.values) dataframe.Amenity = le.fit_transform(dataframe.Amenity.values) dataframe.Side = le.fit_transform(dataframe.Side.values) return dataframe df = encode_columns(df, le) df.head() # get the correlation matrix df.corr() # droping unneeded columns def drop_unneeded_columns(dataframe, c_lst): dataframe = dataframe.drop(columns=c_lst) return dataframe column_lst = [ "date_to_nearest_day", "hours", "time_to_nearest_hour", "Selected", "Bump", "Roundabout", "Wind_Chill(F)", "Precipitation(in)", "date_x", "date_y", ] df = drop_unneeded_columns(df, column_lst) df.corr().Severity.sort_values() plt.subplots(figsize=(15, 10)) plt.plot( (df.corr().Severity.drop("Severity").sort_values()), color="purple", lw=1, ls="--", marker="o", markersize=4, ) plt.xticks(rotation=90) plt.xlabel("Features") plt.ylabel("Correlation With Severity") plt.grid() # # # # Modify the accident class value to combine them in one column def modify_columns(df1): df1.Give_Way.replace(to_replace=1, value=2, inplace=True) df1.Junction.replace(to_replace=1, value=4, inplace=True) df1.No_Exit.replace(to_replace=1, value=8, inplace=True) df1.Railway.replace(to_replace=1, value=16, inplace=True) df1.Stop.replace(to_replace=1, value=16, inplace=True) df1.Amenity.replace(to_replace=1, value=32, inplace=True) df1.Side.replace(to_replace=1, value=64, inplace=True) df1.holiday.replace(to_replace=0, value=2, inplace=True) df1["Accident_class"] = ( df1["Distance(mi)"] + df1["Crossing"] + df1["Give_Way"] + df1["Junction"] + df1["No_Exit"] + df1["Railway"] + df1["Stop"] + df1["Amenity"] + df1["Side"] ).astype(int) modify_columns(df) df.head() # ## Plot features vs. Severity df1 = df.copy().drop( columns=[ "Crossing", "Give_Way", "Junction", "No_Exit", "Railway", "Stop", "Amenity", "Side", ] ) df1.head() s1 = df1[df1["Severity"] == 1] s2 = df1[df1["Severity"] == 2] s3 = df1[df1["Severity"] == 3] s4 = df1[df1["Severity"] == 4] def plot_feature_kde(feature): s1[feature].plot.kde() s2[feature].plot.kde() s3[feature].plot.kde() s4[feature].plot.kde() plt.legend(labels=["S1", "S2", "S3", "S4"]) plot_feature_kde("Lat") plot_feature_kde("Lng") plot_feature_kde("Distance(mi)") plot_feature_kde("Temperature(F)") plot_feature_kde("Visibility(mi)") def plot_feature_kde(s, features_lst): plt.subplots(figsize=(20, 10)) for feature in features_lst: s[feature].plot.kde() plt.legend() feature_lst = [ "Lat", "Lng", "Distance(mi)", "dayofweek", "Year", "Day", "Month", "Hour", "Weather_Condition", "Temperature(F)", "Humidity(%)", "Wind_Speed(mph)", "Visibility(mi)", "Accident_class", ] plot_feature_kde(s1, feature_lst) plot_feature_kde(s2, feature_lst) plot_feature_kde(s3, feature_lst) plot_feature_kde(s4, feature_lst) # ## Remove the outliers # Remove the outliers from Temperature Q1 = df["Temperature(F)"].quantile(0.25) Q3 = df["Temperature(F)"].quantile(0.75) IQR = Q3 - Q1 filter_w = (df["Temperature(F)"] >= Q1 - 1.5 * IQR) & ( df["Temperature(F)"] <= Q3 + 1.5 * IQR ) df = df.loc[filter_w] # ## Data Splitting # Now it's time to split the dataset for the training step. Typically the dataset is split into 3 subsets, namely, the training, validation and test sets. In our case, the test set is already predefined. So we'll split the "training" set into training and validation sets with 0.8:0.2 ratio. # *Note: a good way to generate reproducible results is to set the seed to the algorithms that depends on randomization. This is done with the argument `random_state` in the following command* # train_df, val_df = train_test_split(df, test_size=0.2, random_state=42) # Try adding `stratify` here train_df, val_df = train_test_split( df, test_size=0.2, random_state=42, stratify=df["Severity"] ) X_train = train_df.drop(columns=["ID", "Severity"]) y_train = train_df["Severity"] X_val = val_df.drop(columns=["ID", "Severity"]) y_val = val_df["Severity"] X_train.head() allFeatures = [ "Lat", "Lng", "Distance(mi)", "Year", "Month", "Hour", "Temperature(F)", "dayofweek", "Wind_Speed(mph)", "Weather_Condition", "Accident_class", ] X_train = X_train[allFeatures] X_val = X_val[allFeatures] X_train # ## Model Training # Let's train a model with the data! We'll train a Random Forest Classifier to demonstrate the process of making submissions. # Create an instance of the classifier classifier = RandomForestClassifier(max_depth=2, random_state=0) # Train the classifierAccident_class classifier = classifier.fit(X_train, y_train) # Now let's test our classifier on the validation dataset and see the accuracy. print( "The accuracy of the classifier on the validation set is ", (classifier.score(X_val, y_val)), ) importance = classifier.feature_importances_ for i, v in zip(allFeatures, importance): print(f"Feature: {i}, Score: {round(v,5)}") plt.subplots(figsize=(20, 10)) plt.bar(allFeatures, importance) plt.xticks(rotation=90) plt.xlabel("Features") plt.ylabel("Importance") # plt.savefig('plot3.png', dpi=300 ,bbox_inches='tight'); # Well. That's a good start, right? A classifier that predicts all examples' `Severity` as 2 will get around 0.63. You should get better score as you add more features and do better data preprocessing. # ## Submission File Generation # We have built a model and we'd like to submit our predictions on the test set! In order to do that, we'll load the test set, predict the class and save the submission file. # First, we'll load the data. test_df = pd.read_csv(os.path.join(dataset_path, "test.csv")) test_df.head() # prepare test dataset test_df = Prepare_date(test_df) test_df = merge_weather(test_df, weather_df) test_df = merge_holidays(test_df, holidays_df) drop_na(df, col_lst) test_df = encode_columns(test_df, le) test_df = drop_unneeded_columns(test_df, column_lst) test_df["Wind_Speed(mph)"].fillna(df["Wind_Speed(mph)"].mean(), inplace=True) modify_columns(test_df) test_df.head() test_df.info() X_test = test_df.drop(columns=["ID"]) # You should update/remove the next line once you change the features used for training X_test = X_test[allFeatures] y_test_predicted = classifier.predict(X_test) test_df["Severity"] = y_test_predicted test_df.head() # Now we're ready to generate the submission file. The submission file needs the columns `ID` and `Severity` only. test_df[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
false
0
3,637
2
3,637
3,637
69345541
<jupyter_start><jupyter_text>All Lending Club loan data # Context Update: I probably won't be able to update the data anymore, as LendingClub now has a scary 'TOS' popup when downloading the data. Worst case, they will ask me/Kaggle to take it down from here. This dataset contains the full LendingClub data available from [their site][1]. There are separate files for accepted and rejected loans. The accepted loans also include the FICO scores, which can only be downloaded when you are signed in to LendingClub and download the data. See the Python and R getting started kernels to get started: - R: https://www.kaggle.com/wordsforthewise/eda-in-r-arggghh - Python: https://www.kaggle.com/wordsforthewise/eda-with-python I created a git repo for the code which is used to create this data: https://github.com/nateGeorge/preprocess_lending_club_data # Background I wanted an easy way to share all the lending club data with others. Unfortunately, the [data on their site][1] is fragmented into many smaller files. There is another lending club [dataset on Kaggle][2], but it wasn't updated in years. It seems like the "Kaggle Team" is updating it now. I think it also doesn't include the full rejected loans, which are included here. It seems like the [other dataset][3] confusingly has some of the rejected loans mixed into the accepted ones. Now there are a ton of other [LendingClub datasets on here too][4], most of which seem to have no documentation or explanation of what the data actually is. # Content The definitions for the fields are on the [LendingClub site][5], at the bottom of the page. Kaggle won't let me upload the .xlsx file for some reason since it seems to be in multiple other data repos. This file seems to be in the [other main repo][6], but again, it's better to get it directly from the [source][5]. Unfortunately, there is (maybe "was" now?) a limit of 500MB for dataset files, so I had to compress the files with gzip in the Python pandas package. I cleaned the data a tiny bit: I removed percent symbols (%) from `int_rate` and `revol_util` columns in the accepted loans and converted those columns to floats. # Update The URL column is in the dataset for completeness, as of 2018 Q2. [1]: https://www.lendingclub.com/info/download-data.action [2]: https://www.kaggle.com/wendykan/lending-club-loan-data [3]: https://www.kaggle.com/wendykan/lending-club-loan-data [4]: https://www.kaggle.com/datasets?sortBy=relevance&group=public&search=lending%20club&page=1&pageSize=20&size=all&filetype=all&license=all [5]: https://www.lendingclub.com/info/download-data.action [6]: https://www.kaggle.com/wendykan/lending-club-loan-data Kaggle dataset identifier: lending-club <jupyter_script># unzip .csv files import gzip # data analysis import pandas as pd import numpy as np # visualization import seaborn as sns import matplotlib.pyplot as plt from scipy.special import expit # scaling and train test split from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler # machine learning models from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import BaggingClassifier from sklearn.svm import SVC import tensorflow as tf # model evaluation from sklearn import metrics from sklearn.model_selection import cross_validate # pandas and pyplot settings pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", 200) with gzip.open( "../input/lending-club/accepted_2007_to_2018Q4.csv.gz", "r" ) as a, gzip.open("../input/lending-club/rejected_2007_to_2018Q4.csv.gz", "r") as r: accepted_df = pd.read_csv(a) rejected_df = pd.read_csv(r) # inspect dataframes accepted_df.head(1) rejected_df.head(1) # only keep columns that are relevant, see explanation on table below # also, because the rejected dataset doesn't provide info on defaults, we will not further use it. df = accepted_df[ [ "loan_amnt", "term", "int_rate", "installment", "grade", "sub_grade", "emp_length", "home_ownership", "annual_inc", "verification_status", "loan_status", "purpose", "addr_state", "dti", "open_acc", "pub_rec", "revol_bal", "revol_util", "total_acc", "initial_list_status", "application_type", "mort_acc", "pub_rec_bankruptcies", "disbursement_method", ] ] # Thats a lot of columns to unpack. Here are some informations in found either online or in the [Notebook of Thomas Mantero](https://www.kaggle.com/tomasmantero/minimizing-risks-for-loan-investments-keras-ann). Every column that has something to do with a running credit (last payment etc.) is deleted. Some other rows are deleted for obvious reasons. For the rest of the deleted columns, the table explains them and why they are deleted. # # # Deleted Column # Description # # # # # id, member_id # New transactions and members will not have a history of previous loans, so they cannot provide information for new loans. # # funded_amnt, funded_amnt_inv # Mutual information with loan_amnt # # # emp_title # The job title supplied by the Borrower when applying for the loan. It is likely to have a correlation with the default rate, but there are more than 500.000 unique entries, so the column will be dropped. # # # issue_d # The month which the loan was funded. If we assume that the issue date has an influence on the default rate, loans from January 2016 would be either more or less likely to default than in October 2017, which I don't think is likely. # # # issue_d # The month which the loan was funded. If we assume that the issue date has an influence on the default rate, loans from January 2016 would be either more or less likely to default than in October 2017, which I don't think is likely. # # # zip_code # The first 3 numbers of the zip code provided by the borrower in the loan application. Could hold usefull information, but i did not want to create 900 dummy variables for now. # # # earliest_cr_line # The month the borrower's earliest reported credit line was opened. Same argumentation as with issue_d, and 700 dummy variables would be needed. # # # title # The loan title provided by the borrower, which oculd provide useful information but mostly shares information with 'purpose' # # # hardship_flag # Tells if loan taker was in hardship. Sadly, there is only one unique value in this column. # # # debt_settlement_flag # Tells if the loan taker has ever had a type of debt relief. All 'yes' flags have are loan defaults, so I think this variable is added AFTER the default, so it cannot be used for analysis. # # # # # Column kept in dataframe # Description # # # # # loan_amnt # The listed amount of the loan applied for by the borrower. If at some point in time, the credit department reduces the loan amount, then it will be reflected in this value. # # # term # The number of payments on the loan. Values are in months and can be either 36 or 60. # # # int_rate # Interest Rate on the loan # # # installment # The monthly payment owed by the borrower if the loan originates. # # # grade # LC assigned loan grade # # # sub_grade # LC assigned loan subgrade # # # emp_length # Employment length in years. Possible values are between 0 and 10 where 0 means less than one year and 10 means ten or more years. # # # home_ownership # The home ownership status provided by the borrower during registration or obtained from the credit report. Our values are: RENT, OWN, MORTGAGE, OTHER # # # annual_inc # The self-reported annual income provided by the borrower during registration. # # # verification_status # Indicates if income was verified by LC, not verified, or if the income source was verified # # # loan_status # Current status of the loan # # # pymnt_plan # The payment plan of the loan. Can take values of ... # # # purpose # A category provided by the borrower for the loan request. # # # addr_state # The state provided by the borrower in the loan application # # # dti # A ratio calculated using the borrower’s total monthly debt payments on the total debt obligations, excluding mortgage and the requested LC loan, divided by the borrower’s self-reported monthly income. # # # delinq_2yrs # The number of times the borrower had been 30+ days past due on a payment in the past 2 years. # # # fico_range_low, fico_range_high # FICO scores predict the default likeliness of a person based on credit reports. # # # open_acc # The number of open credit lines in the borrower's credit file. # # # pub_rec # Number of derogatory public records # # # revol_bal # Total credit revolving balance # # # revol_util # Revolving line utilization rate, or the amount of credit the borrower is using relative to all available revolving credit. # # # total_acc # The total number of credit lines currently in the borrower's credit file # # # initial_list_status # The initial listing status of the loan. Possible values are – W, F # # # application_type # Indicates whether the loan is an individual application or a joint application with two co-borrowers # # # mort_acc # Number of mortgage accounts. # # # pub_rec_bankruptcies # Number of public record bankruptcies # # # disbursement_method # The type of disbursement for the loan # # # #### Check the values our target column can have # We want to separate the observations where a loan is in default, and where the loan was fully paid. # There are only 40 observations for default, which means that the data set is extremely imbalanced. To be able to adequately train a model, we need more observations. A "charged off" loan is basically a loan which is expected to default in the future. We will assume that "charged off" loans will default in the future. # Then there are values for loans which are currently ongoing. Because we are unable to tell whether these observations will default or not, they are dropped from the dataframe. # Furthermore there are observation where the loan status informs that the loan does not meet the credit policy. As I honestly do not know what that exactly means, these values will also be dropped. df.loan_status.value_counts() df = df[df.loan_status.isin(["Fully Paid", "Charged Off", "Default"])] df["default"] = df.apply( lambda x: 1 if x.loan_status in ["Default", "Charged Off"] else 0, axis=1 ) df.default.value_counts() df.describe().transpose() # #### Correlation matrix of all features # There are three strong correlations between features in this data set: # * The loan amount and installment, which makes sense if the # * total_acc and open_acc # * The public record and the public record for bankruptcies. The general puplic record probably also holds entries for bankruptcies sns.set(style="whitegrid", font_scale=1) plt.figure(figsize=(12, 12)) plt.title("Pearson Correlation Matrix", fontsize=25) sns.heatmap( df.corr(), linewidths=0.25, vmax=0.7, square=True, cmap="RdPu", linecolor="w", annot=True, annot_kws={"size": 10}, cbar_kws={"shrink": 0.7}, ) # #### Depicting the relation of the columns to the default column # We plot the relation of all variables to the default column. The showfliers parameter had to be added to the boxplot method, otherwise the income figure was not readable. # * Interestingly, if the income of a person is verified, the persons default rate is higher. # * The public record boxplots are undistinguishable from a line on y=0. This is because outliers have been removed, and having a public record seems to be a rare outlier. Because of the correlation to default shown in the correlation matrix, the columns for public record are not removed. for column in df.columns: if df[column].dtype == "float64": fig, ax = plt.subplots(figsize=(12, 12)) sns.boxplot(ax=ax, x="default", y=column, data=df, showfliers=False) elif (df[column].dtype == "object") and (len(df[column].unique()) < 52): fig, ax = plt.subplots(figsize=(12, 12)) data = ( df.groupby(column)["default"] .value_counts(normalize=True) .mul(100) .rename("percent") .reset_index() ) sns.barplot(data=data, x=column, y="percent", hue="default", ax=ax) # ## Preprocessing # ### One-hot encoding categorical variables # We need to create dummy vairables for the categorical variables. # df.describe(include="all").loc["unique", :] df = pd.get_dummies(df) # ### Handling NaN values for column in df.columns: if df[column].isnull().sum() > 0: df[column] = df[column].fillna(df[column].mean()) # # Training machine learning models on the data # ## Logistic regression (in-sample) # Creating X and y sets without validation sets X = df.drop("default", axis=1) y = df.default # Creating and fitting a logistic regression model with penalty set to none. Default is ridge regression with l2 penalty. lr_model = LogisticRegression(penalty="none", random_state=0) lr_model.fit(X, y) # Creating and evaluating in-samples predictions of the logistic regression model. test_df = df.sample(n=100000, random_state=0) X_test = test_df.drop("default", axis=1) y_test = test_df.default predictions = lr_model.predict(X_test) mae = metrics.mean_absolute_error(y_test, predictions) mse = metrics.mean_squared_error(y_test, predictions) r2 = metrics.r2_score(y_test, predictions) print( """ MAE: {:.3f} MSE: {:.3f} R^2: {:.3f} """.format( mae, mse, r2 ) ) # ### Interpreting the model # * We can clearly observe from the figures that a worse grade should lead to a higher parameter for the variable. This is only partly the case for this logistic regression model. # * The employment time 10+ years has the lowest parameter of all employment time variables, which corresponds to what we can see in the figures. # * The 60 month term variable has a way higher parameter than the 36 month variable # In general it can be said that the parameters are similar to the correlations we can observe in the figures, yet the model performs terribly bad. The R^2 is negative, which means that the model performs worse that just taking the average value of the default column as prediction. The intercept of the model is slightly negative (-0.00139), which means that when all variables would be zero, the interpret_df = pd.DataFrame( [X.columns, lr_model.coef_[0]], index=["Variable", "Parameter"] ).transpose() print(lr_model.intercept_) interpret_df # ## Logistic regression (out-of-sample) # First we need to create train and test splits for our dataset. A small test_size parameter is given, because our dataset has a sufficient number of observations. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0) # define as function to reuse for different models def evaluate_model( model_name, X, y, X_train, X_test, y_train, y_test ): # I didn't want to use the X & y global variables, thats why im passing them to the function model_dict = { "lr": LogisticRegression(random_state=0), "rfc": RandomForestClassifier(random_state=0), "bag": BaggingClassifier(random_state=0), "svc": SVC(random_state=0), } model = model_dict[model_name] # method one: train on train set, predict on test set model.fit(X_train, y_train) predictions = lr_model.predict(X_test) accuracy = metrics.accuracy_score(y_test, predictions) mae = metrics.mean_absolute_error(y_test, predictions) mse = metrics.mean_squared_error(y_test, predictions) r2 = metrics.r2_score(y_test, predictions) print( """Simple Accuracy: {:.3f} MAE: {:.3f} MSE: {:.3f} R^2: {:.3f} """.format( accuracy, mae, mse, r2 ) ) # method 2: use cross_validate() to fit and predict multiple times. Use the mean of these results as performance measures scores = cross_validate( model_copy, X=X_test, y=y_test, cv=5, scoring=("accuracy", "neg_mean_absolute_error", "neg_mean_squared_error", "r2"), return_train_score=True, ) print( """Cross-Validation Accuracy: {:.3f} MAE: {:.3f} MSE: {:.3f} R^2: {:.3f} """.format( scores["test_accuracy"].mean(), scores["test_neg_mean_absolute_error"].mean(), scores["test_neg_mean_squared_error"].mean(), scores["test_r2"].mean(), ) ) evaluate_model( LogisticRegression(penalty="none", random_state=0), X, y, X_train, X_test, y_train, y_test, ) # ## Classification Tree evaluate_model( DecisionTreeClassifier(random_state=0), X, y, X_train, X_test, y_train, y_test ) # ## Random Forest evaluate_model( RandomForestClassifier(random_state=0), X, y, X_train, X_test, y_train, y_test ) # ## Bagging evaluate_model( BaggingClassifier(random_state=0), X, y, X_train, X_test, y_train, y_test ) # ## Support Vector Machine evaluate_model(SVC(random_state=0), X, y, X_train, X_test, y_train, y_test)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/345/69345541.ipynb
lending-club
wordsforthewise
[{"Id": 69345541, "ScriptId": 18896506, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7206131, "CreationDate": "07/29/2021 21:06:42", "VersionNumber": 3.0, "Title": "Predicting the Default Rate for Peer-to-Peer Loans", "EvaluationDate": "07/29/2021", "IsChange": true, "TotalLines": 421.0, "LinesInsertedFromPrevious": 11.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 410.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92398197, "KernelVersionId": 69345541, "SourceDatasetVersionId": 370089}]
[{"Id": 370089, "DatasetId": 902, "DatasourceVersionId": 384604, "CreatorUserId": 393328, "LicenseName": "CC0: Public Domain", "CreationDate": "04/10/2019 18:03:34", "VersionNumber": 3.0, "Title": "All Lending Club loan data", "Slug": "lending-club", "Subtitle": "2007 through current Lending Club accepted and rejected loan data", "Description": "# Context \n\nUpdate: I probably won't be able to update the data anymore, as LendingClub now has a scary 'TOS' popup when downloading the data. Worst case, they will ask me/Kaggle to take it down from here.\n\nThis dataset contains the full LendingClub data available from [their site][1]. There are separate files for accepted and rejected loans. The accepted loans also include the FICO scores, which can only be downloaded when you are signed in to LendingClub and download the data.\n\nSee the Python and R getting started kernels to get started:\n\n- R: https://www.kaggle.com/wordsforthewise/eda-in-r-arggghh\n- Python: https://www.kaggle.com/wordsforthewise/eda-with-python\n\nI created a git repo for the code which is used to create this data: https://github.com/nateGeorge/preprocess_lending_club_data\n\n# Background\n\nI wanted an easy way to share all the lending club data with others. Unfortunately, the [data on their site][1] is fragmented into many smaller files. There is another lending club [dataset on Kaggle][2], but it wasn't updated in years. It seems like the \"Kaggle Team\" is updating it now. I think it also doesn't include the full rejected loans, which are included here. It seems like the [other dataset][3] confusingly has some of the rejected loans mixed into the accepted ones. Now there are a ton of other [LendingClub datasets on here too][4], most of which seem to have no documentation or explanation of what the data actually is.\n\n\n# Content\n\nThe definitions for the fields are on the [LendingClub site][5], at the bottom of the page. Kaggle won't let me upload the .xlsx file for some reason since it seems to be in multiple other data repos. This file seems to be in the [other main repo][6], but again, it's better to get it directly from the [source][5].\n\nUnfortunately, there is (maybe \"was\" now?) a limit of 500MB for dataset files, so I had to compress the files with gzip in the Python pandas package. \n\nI cleaned the data a tiny bit: I removed percent symbols (%) from `int_rate` and `revol_util` columns in the accepted loans and converted those columns to floats.\n\n# Update\nThe URL column is in the dataset for completeness, as of 2018 Q2.\n\n\n [1]: https://www.lendingclub.com/info/download-data.action\n [2]: https://www.kaggle.com/wendykan/lending-club-loan-data\n [3]: https://www.kaggle.com/wendykan/lending-club-loan-data\n [4]: https://www.kaggle.com/datasets?sortBy=relevance&group=public&search=lending%20club&page=1&pageSize=20&size=all&filetype=all&license=all\n [5]: https://www.lendingclub.com/info/download-data.action\n [6]: https://www.kaggle.com/wendykan/lending-club-loan-data", "VersionNotes": "Add data definitions file, even though it's on LendingClub's site. Also remove older datasets.", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 648053013.0}]
[{"Id": 902, "CreatorUserId": 393328, "OwnerUserId": 393328.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 370089.0, "CurrentDatasourceVersionId": 384604.0, "ForumId": 2758, "Type": 2, "CreationDate": "03/01/2017 22:25:40", "LastActivityDate": "02/06/2018", "TotalViews": 353986, "TotalDownloads": 48692, "TotalVotes": 669, "TotalKernels": 82}]
[{"Id": 393328, "UserName": "wordsforthewise", "DisplayName": "Nathan George", "RegisterDate": "07/30/2015", "PerformanceTier": 1}]
# unzip .csv files import gzip # data analysis import pandas as pd import numpy as np # visualization import seaborn as sns import matplotlib.pyplot as plt from scipy.special import expit # scaling and train test split from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler # machine learning models from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import BaggingClassifier from sklearn.svm import SVC import tensorflow as tf # model evaluation from sklearn import metrics from sklearn.model_selection import cross_validate # pandas and pyplot settings pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", 200) with gzip.open( "../input/lending-club/accepted_2007_to_2018Q4.csv.gz", "r" ) as a, gzip.open("../input/lending-club/rejected_2007_to_2018Q4.csv.gz", "r") as r: accepted_df = pd.read_csv(a) rejected_df = pd.read_csv(r) # inspect dataframes accepted_df.head(1) rejected_df.head(1) # only keep columns that are relevant, see explanation on table below # also, because the rejected dataset doesn't provide info on defaults, we will not further use it. df = accepted_df[ [ "loan_amnt", "term", "int_rate", "installment", "grade", "sub_grade", "emp_length", "home_ownership", "annual_inc", "verification_status", "loan_status", "purpose", "addr_state", "dti", "open_acc", "pub_rec", "revol_bal", "revol_util", "total_acc", "initial_list_status", "application_type", "mort_acc", "pub_rec_bankruptcies", "disbursement_method", ] ] # Thats a lot of columns to unpack. Here are some informations in found either online or in the [Notebook of Thomas Mantero](https://www.kaggle.com/tomasmantero/minimizing-risks-for-loan-investments-keras-ann). Every column that has something to do with a running credit (last payment etc.) is deleted. Some other rows are deleted for obvious reasons. For the rest of the deleted columns, the table explains them and why they are deleted. # # # Deleted Column # Description # # # # # id, member_id # New transactions and members will not have a history of previous loans, so they cannot provide information for new loans. # # funded_amnt, funded_amnt_inv # Mutual information with loan_amnt # # # emp_title # The job title supplied by the Borrower when applying for the loan. It is likely to have a correlation with the default rate, but there are more than 500.000 unique entries, so the column will be dropped. # # # issue_d # The month which the loan was funded. If we assume that the issue date has an influence on the default rate, loans from January 2016 would be either more or less likely to default than in October 2017, which I don't think is likely. # # # issue_d # The month which the loan was funded. If we assume that the issue date has an influence on the default rate, loans from January 2016 would be either more or less likely to default than in October 2017, which I don't think is likely. # # # zip_code # The first 3 numbers of the zip code provided by the borrower in the loan application. Could hold usefull information, but i did not want to create 900 dummy variables for now. # # # earliest_cr_line # The month the borrower's earliest reported credit line was opened. Same argumentation as with issue_d, and 700 dummy variables would be needed. # # # title # The loan title provided by the borrower, which oculd provide useful information but mostly shares information with 'purpose' # # # hardship_flag # Tells if loan taker was in hardship. Sadly, there is only one unique value in this column. # # # debt_settlement_flag # Tells if the loan taker has ever had a type of debt relief. All 'yes' flags have are loan defaults, so I think this variable is added AFTER the default, so it cannot be used for analysis. # # # # # Column kept in dataframe # Description # # # # # loan_amnt # The listed amount of the loan applied for by the borrower. If at some point in time, the credit department reduces the loan amount, then it will be reflected in this value. # # # term # The number of payments on the loan. Values are in months and can be either 36 or 60. # # # int_rate # Interest Rate on the loan # # # installment # The monthly payment owed by the borrower if the loan originates. # # # grade # LC assigned loan grade # # # sub_grade # LC assigned loan subgrade # # # emp_length # Employment length in years. Possible values are between 0 and 10 where 0 means less than one year and 10 means ten or more years. # # # home_ownership # The home ownership status provided by the borrower during registration or obtained from the credit report. Our values are: RENT, OWN, MORTGAGE, OTHER # # # annual_inc # The self-reported annual income provided by the borrower during registration. # # # verification_status # Indicates if income was verified by LC, not verified, or if the income source was verified # # # loan_status # Current status of the loan # # # pymnt_plan # The payment plan of the loan. Can take values of ... # # # purpose # A category provided by the borrower for the loan request. # # # addr_state # The state provided by the borrower in the loan application # # # dti # A ratio calculated using the borrower’s total monthly debt payments on the total debt obligations, excluding mortgage and the requested LC loan, divided by the borrower’s self-reported monthly income. # # # delinq_2yrs # The number of times the borrower had been 30+ days past due on a payment in the past 2 years. # # # fico_range_low, fico_range_high # FICO scores predict the default likeliness of a person based on credit reports. # # # open_acc # The number of open credit lines in the borrower's credit file. # # # pub_rec # Number of derogatory public records # # # revol_bal # Total credit revolving balance # # # revol_util # Revolving line utilization rate, or the amount of credit the borrower is using relative to all available revolving credit. # # # total_acc # The total number of credit lines currently in the borrower's credit file # # # initial_list_status # The initial listing status of the loan. Possible values are – W, F # # # application_type # Indicates whether the loan is an individual application or a joint application with two co-borrowers # # # mort_acc # Number of mortgage accounts. # # # pub_rec_bankruptcies # Number of public record bankruptcies # # # disbursement_method # The type of disbursement for the loan # # # #### Check the values our target column can have # We want to separate the observations where a loan is in default, and where the loan was fully paid. # There are only 40 observations for default, which means that the data set is extremely imbalanced. To be able to adequately train a model, we need more observations. A "charged off" loan is basically a loan which is expected to default in the future. We will assume that "charged off" loans will default in the future. # Then there are values for loans which are currently ongoing. Because we are unable to tell whether these observations will default or not, they are dropped from the dataframe. # Furthermore there are observation where the loan status informs that the loan does not meet the credit policy. As I honestly do not know what that exactly means, these values will also be dropped. df.loan_status.value_counts() df = df[df.loan_status.isin(["Fully Paid", "Charged Off", "Default"])] df["default"] = df.apply( lambda x: 1 if x.loan_status in ["Default", "Charged Off"] else 0, axis=1 ) df.default.value_counts() df.describe().transpose() # #### Correlation matrix of all features # There are three strong correlations between features in this data set: # * The loan amount and installment, which makes sense if the # * total_acc and open_acc # * The public record and the public record for bankruptcies. The general puplic record probably also holds entries for bankruptcies sns.set(style="whitegrid", font_scale=1) plt.figure(figsize=(12, 12)) plt.title("Pearson Correlation Matrix", fontsize=25) sns.heatmap( df.corr(), linewidths=0.25, vmax=0.7, square=True, cmap="RdPu", linecolor="w", annot=True, annot_kws={"size": 10}, cbar_kws={"shrink": 0.7}, ) # #### Depicting the relation of the columns to the default column # We plot the relation of all variables to the default column. The showfliers parameter had to be added to the boxplot method, otherwise the income figure was not readable. # * Interestingly, if the income of a person is verified, the persons default rate is higher. # * The public record boxplots are undistinguishable from a line on y=0. This is because outliers have been removed, and having a public record seems to be a rare outlier. Because of the correlation to default shown in the correlation matrix, the columns for public record are not removed. for column in df.columns: if df[column].dtype == "float64": fig, ax = plt.subplots(figsize=(12, 12)) sns.boxplot(ax=ax, x="default", y=column, data=df, showfliers=False) elif (df[column].dtype == "object") and (len(df[column].unique()) < 52): fig, ax = plt.subplots(figsize=(12, 12)) data = ( df.groupby(column)["default"] .value_counts(normalize=True) .mul(100) .rename("percent") .reset_index() ) sns.barplot(data=data, x=column, y="percent", hue="default", ax=ax) # ## Preprocessing # ### One-hot encoding categorical variables # We need to create dummy vairables for the categorical variables. # df.describe(include="all").loc["unique", :] df = pd.get_dummies(df) # ### Handling NaN values for column in df.columns: if df[column].isnull().sum() > 0: df[column] = df[column].fillna(df[column].mean()) # # Training machine learning models on the data # ## Logistic regression (in-sample) # Creating X and y sets without validation sets X = df.drop("default", axis=1) y = df.default # Creating and fitting a logistic regression model with penalty set to none. Default is ridge regression with l2 penalty. lr_model = LogisticRegression(penalty="none", random_state=0) lr_model.fit(X, y) # Creating and evaluating in-samples predictions of the logistic regression model. test_df = df.sample(n=100000, random_state=0) X_test = test_df.drop("default", axis=1) y_test = test_df.default predictions = lr_model.predict(X_test) mae = metrics.mean_absolute_error(y_test, predictions) mse = metrics.mean_squared_error(y_test, predictions) r2 = metrics.r2_score(y_test, predictions) print( """ MAE: {:.3f} MSE: {:.3f} R^2: {:.3f} """.format( mae, mse, r2 ) ) # ### Interpreting the model # * We can clearly observe from the figures that a worse grade should lead to a higher parameter for the variable. This is only partly the case for this logistic regression model. # * The employment time 10+ years has the lowest parameter of all employment time variables, which corresponds to what we can see in the figures. # * The 60 month term variable has a way higher parameter than the 36 month variable # In general it can be said that the parameters are similar to the correlations we can observe in the figures, yet the model performs terribly bad. The R^2 is negative, which means that the model performs worse that just taking the average value of the default column as prediction. The intercept of the model is slightly negative (-0.00139), which means that when all variables would be zero, the interpret_df = pd.DataFrame( [X.columns, lr_model.coef_[0]], index=["Variable", "Parameter"] ).transpose() print(lr_model.intercept_) interpret_df # ## Logistic regression (out-of-sample) # First we need to create train and test splits for our dataset. A small test_size parameter is given, because our dataset has a sufficient number of observations. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0) # define as function to reuse for different models def evaluate_model( model_name, X, y, X_train, X_test, y_train, y_test ): # I didn't want to use the X & y global variables, thats why im passing them to the function model_dict = { "lr": LogisticRegression(random_state=0), "rfc": RandomForestClassifier(random_state=0), "bag": BaggingClassifier(random_state=0), "svc": SVC(random_state=0), } model = model_dict[model_name] # method one: train on train set, predict on test set model.fit(X_train, y_train) predictions = lr_model.predict(X_test) accuracy = metrics.accuracy_score(y_test, predictions) mae = metrics.mean_absolute_error(y_test, predictions) mse = metrics.mean_squared_error(y_test, predictions) r2 = metrics.r2_score(y_test, predictions) print( """Simple Accuracy: {:.3f} MAE: {:.3f} MSE: {:.3f} R^2: {:.3f} """.format( accuracy, mae, mse, r2 ) ) # method 2: use cross_validate() to fit and predict multiple times. Use the mean of these results as performance measures scores = cross_validate( model_copy, X=X_test, y=y_test, cv=5, scoring=("accuracy", "neg_mean_absolute_error", "neg_mean_squared_error", "r2"), return_train_score=True, ) print( """Cross-Validation Accuracy: {:.3f} MAE: {:.3f} MSE: {:.3f} R^2: {:.3f} """.format( scores["test_accuracy"].mean(), scores["test_neg_mean_absolute_error"].mean(), scores["test_neg_mean_squared_error"].mean(), scores["test_r2"].mean(), ) ) evaluate_model( LogisticRegression(penalty="none", random_state=0), X, y, X_train, X_test, y_train, y_test, ) # ## Classification Tree evaluate_model( DecisionTreeClassifier(random_state=0), X, y, X_train, X_test, y_train, y_test ) # ## Random Forest evaluate_model( RandomForestClassifier(random_state=0), X, y, X_train, X_test, y_train, y_test ) # ## Bagging evaluate_model( BaggingClassifier(random_state=0), X, y, X_train, X_test, y_train, y_test ) # ## Support Vector Machine evaluate_model(SVC(random_state=0), X, y, X_train, X_test, y_train, y_test)
false
0
4,171
0
4,944
4,171
69248122
<jupyter_start><jupyter_text>preB4_h5_0727 Kaggle dataset identifier: preb4-h5-0727 <jupyter_script>from tqdm.notebook import tqdm import matplotlib.pyplot as plt import pandas as pd import numpy as np import shutil import os import keras train = pd.read_csv("../input/plant-pathology-2021-fgvc8/train.csv") print(train.shape) train.head() seed = 32 target_size = (380, 380) batch_size = 8 test_img = "../input/plant-pathology-2021-fgvc8/test_images" submission = pd.read_csv("../input/plant-pathology-2021-fgvc8/sample_submission.csv") submission.head() import cv2 import numpy as np def get_cut_image(image): img = cv2.blur(image, (3, 3)) copy = np.uint8(img) canny = cv2.Canny(copy, 145, 165) box = np.argwhere(canny > 0) y1, x1 = box.min(axis=0) y2, x2 = box.max(axis=0) cut_img = img[y1:y2, x1:x2] cut_img = cv2.resize(cut_img, target_size) cut_img = cut_img.astype("float32") * (1.0) / 255 return np.array(cut_img) from keras.preprocessing.image import ImageDataGenerator pre = ImageDataGenerator( rescale=1.0 / 255, brightness_range=[0.5, 1.5], rotation_range=15, # 45 shear_range=0.2, zoom_range=0.3, # 0.2 featurewise_center=False, # featurewise_std_normalization=False, # horizontal_flip=True, width_shift_range=0.2, height_shift_range=0.2, vertical_flip=True, ) # validation_split= 0.2,) pos = ImageDataGenerator( rescale=1.0 / 255, brightness_range=[0.5, 1.5], rotation_range=15, # 45 shear_range=0.2, zoom_range=0.3, # 0.2 featurewise_center=False, # featurewise_std_normalization=False, # horizontal_flip=True, width_shift_range=0.2, height_shift_range=0.2, vertical_flip=True, # validation_split= 0.2, # preprocessing_function = get_cut_image, ) test_generator_b4 = pos.flow_from_dataframe( submission, directory=test_img, x_col="image", y_col="labels", class_mode="raw", batch_size=batch_size, target_size=target_size, color_mode="rgb", shuffle=False, seed=seed, ) test_generator_X = pre.flow_from_dataframe( submission, directory=test_img, x_col="image", y_col="labels", class_mode="raw", batch_size=batch_size, target_size=target_size, color_mode="rgb", shuffle=False, seed=seed, ) # ## Efficientnetb4 model = keras.models.load_model( "../input/b4-0728-2010/B4(oversample)4Fold_cv_BC_2552.h5", compile=False ) tta_steps = 6 predictions = [] for i in range(tta_steps): preds = model.predict(test_generator_b4) predictions.append(preds) pred_EffB4 = np.mean(predictions, axis=0) # ## Xception model = keras.models.load_model("../input/prexception/5.h5", compile=False) tta_steps = 6 predictions = [] for i in range(tta_steps): preds = model.predict(test_generator_X) predictions.append(preds) pred_Xception = np.mean(predictions, axis=0) predmean = (pred_EffB4 + pred_Xception) / 2.0 np.around(predmean, decimals=3) perdict = predmean > 0.30 n_label = ["complex", "frog_eye_leaf_spot", "healthy", "powdery_mildew", "rust", "scab"] answer = [] for i in range(perdict.shape[0]): temp = [] for j, k in enumerate(n_label): if perdict[i, j]: temp.append(k) answer.append(temp) answer = [" ".join(n) for n in answer] submission["labels"] = np.array(answer) submission submission.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/248/69248122.ipynb
preb4-h5-0727
davidkuo0401
[{"Id": 69248122, "ScriptId": 18847649, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7599563, "CreationDate": "07/28/2021 15:37:23", "VersionNumber": 17.0, "Title": "PP-(oversample)-TTA-ensemble", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 141.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 139.0, "LinesInsertedFromFork": 52.0, "LinesDeletedFromFork": 23.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 89.0, "TotalVotes": 0}]
[{"Id": 92183377, "KernelVersionId": 69248122, "SourceDatasetVersionId": 2467987}]
[{"Id": 2467987, "DatasetId": 1493988, "DatasourceVersionId": 2510448, "CreatorUserId": 7599563, "LicenseName": "Unknown", "CreationDate": "07/27/2021 09:51:12", "VersionNumber": 1.0, "Title": "preB4_h5_0727", "Slug": "preb4-h5-0727", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1493988, "CreatorUserId": 7599563, "OwnerUserId": 7599563.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2467987.0, "CurrentDatasourceVersionId": 2510448.0, "ForumId": 1513704, "Type": 2, "CreationDate": "07/27/2021 09:51:12", "LastActivityDate": "07/27/2021", "TotalViews": 834, "TotalDownloads": 3, "TotalVotes": 0, "TotalKernels": 1}]
[{"Id": 7599563, "UserName": "davidkuo0401", "DisplayName": "David Kuo", "RegisterDate": "06/06/2021", "PerformanceTier": 0}]
from tqdm.notebook import tqdm import matplotlib.pyplot as plt import pandas as pd import numpy as np import shutil import os import keras train = pd.read_csv("../input/plant-pathology-2021-fgvc8/train.csv") print(train.shape) train.head() seed = 32 target_size = (380, 380) batch_size = 8 test_img = "../input/plant-pathology-2021-fgvc8/test_images" submission = pd.read_csv("../input/plant-pathology-2021-fgvc8/sample_submission.csv") submission.head() import cv2 import numpy as np def get_cut_image(image): img = cv2.blur(image, (3, 3)) copy = np.uint8(img) canny = cv2.Canny(copy, 145, 165) box = np.argwhere(canny > 0) y1, x1 = box.min(axis=0) y2, x2 = box.max(axis=0) cut_img = img[y1:y2, x1:x2] cut_img = cv2.resize(cut_img, target_size) cut_img = cut_img.astype("float32") * (1.0) / 255 return np.array(cut_img) from keras.preprocessing.image import ImageDataGenerator pre = ImageDataGenerator( rescale=1.0 / 255, brightness_range=[0.5, 1.5], rotation_range=15, # 45 shear_range=0.2, zoom_range=0.3, # 0.2 featurewise_center=False, # featurewise_std_normalization=False, # horizontal_flip=True, width_shift_range=0.2, height_shift_range=0.2, vertical_flip=True, ) # validation_split= 0.2,) pos = ImageDataGenerator( rescale=1.0 / 255, brightness_range=[0.5, 1.5], rotation_range=15, # 45 shear_range=0.2, zoom_range=0.3, # 0.2 featurewise_center=False, # featurewise_std_normalization=False, # horizontal_flip=True, width_shift_range=0.2, height_shift_range=0.2, vertical_flip=True, # validation_split= 0.2, # preprocessing_function = get_cut_image, ) test_generator_b4 = pos.flow_from_dataframe( submission, directory=test_img, x_col="image", y_col="labels", class_mode="raw", batch_size=batch_size, target_size=target_size, color_mode="rgb", shuffle=False, seed=seed, ) test_generator_X = pre.flow_from_dataframe( submission, directory=test_img, x_col="image", y_col="labels", class_mode="raw", batch_size=batch_size, target_size=target_size, color_mode="rgb", shuffle=False, seed=seed, ) # ## Efficientnetb4 model = keras.models.load_model( "../input/b4-0728-2010/B4(oversample)4Fold_cv_BC_2552.h5", compile=False ) tta_steps = 6 predictions = [] for i in range(tta_steps): preds = model.predict(test_generator_b4) predictions.append(preds) pred_EffB4 = np.mean(predictions, axis=0) # ## Xception model = keras.models.load_model("../input/prexception/5.h5", compile=False) tta_steps = 6 predictions = [] for i in range(tta_steps): preds = model.predict(test_generator_X) predictions.append(preds) pred_Xception = np.mean(predictions, axis=0) predmean = (pred_EffB4 + pred_Xception) / 2.0 np.around(predmean, decimals=3) perdict = predmean > 0.30 n_label = ["complex", "frog_eye_leaf_spot", "healthy", "powdery_mildew", "rust", "scab"] answer = [] for i in range(perdict.shape[0]): temp = [] for j, k in enumerate(n_label): if perdict[i, j]: temp.append(k) answer.append(temp) answer = [" ".join(n) for n in answer] submission["labels"] = np.array(answer) submission submission.to_csv("submission.csv", index=False)
false
2
1,225
0
1,261
1,225
69248918
<jupyter_start><jupyter_text>Chocolate Bar 2020 ### Context Chocolate is one of the most popular candies in the world. Each year, residents of the United States collectively eat more than 2.8 billion pounds. However, not all chocolate bars are created equal! This dataset contains expert ratings of over 1,700 individual chocolate bars, along with information on their regional origin, percentage of cocoa, the variety of chocolate bean used, and where the beans were grown. ## Flavors of Cacao Rating System: Rating Scale 4.0 - 5.0 = Outstanding 3.5 - 3.9 = Highly Recommended 3.0 - 3.49 = Recommended 2.0 - 2.9 = Disappointing 1.0 - 1.9 = Unpleasant *Not all the bars in each range are considered equal, so to show variance from bars in the same range I have assigned .25, .50 or .75. Each chocolate is evaluated from a combination of both objective qualities and subjective interpretation. A rating here only represents an experience with one bar from one batch. Batch numbers, vintages, and review dates are included in the database when known. I would recommend people to try all the chocolate on the database regardless of the rating and experience for themselves. The database is narrowly focused on plain dark chocolate to appreciate the flavors of the cacao when made into chocolate. The ratings do not reflect health benefits, social missions, or organic status. The flavor is the most important component of the Flavors of Cacao ratings. Diversity, balance, intensity, and purity of flavors are all considered. A straight forward single note chocolate can rate as high as a complex flavor profile that changes throughout. Genetics, terroir, post-harvest techniques, processing, and storage can all be discussed when considering the flavor component. Texture has a great impact on the overall experience and it is also possible for texture related issues to impact flavor. It is a good way to evaluate the makers' vision, attention to detail, and level of proficiency. Aftermelt is the experience after the chocolate has melted. Higher quality chocolate will linger and be long-lasting and enjoyable. Since the after melt is the last impression you get from the chocolate, it receives equal importance in the overall rating. Overall Opinion is really where the ratings reflect a subjective opinion. Ideally, it is my evaluation of whether or not the components above worked together and opinion on the flavor development, character, and style. It is also here where each chocolate can usually be summarized by the most prominent impressions that you would remember about each chocolate Kaggle dataset identifier: chocolate-bar-2020 <jupyter_code>import pandas as pd df = pd.read_csv('chocolate-bar-2020/chocolate.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 2224 entries, 0 to 2223 Data columns (total 21 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 2224 non-null int64 1 ref 2224 non-null int64 2 company 2224 non-null object 3 company_location 2224 non-null object 4 review_date 2224 non-null int64 5 country_of_bean_origin 2224 non-null object 6 specific_bean_origin_or_bar_name 2224 non-null object 7 cocoa_percent 2224 non-null float64 8 rating 2224 non-null float64 9 counts_of_ingredients 2224 non-null int64 10 beans 2224 non-null object 11 cocoa_butter 2224 non-null object 12 vanilla 2224 non-null object 13 lecithin 2224 non-null object 14 salt 2224 non-null object 15 sugar 2224 non-null object 16 sweetener_without_sugar 2224 non-null object 17 first_taste 2224 non-null object 18 second_taste 2147 non-null object 19 third_taste 1604 non-null object 20 fourth_taste 242 non-null object dtypes: float64(2), int64(4), object(15) memory usage: 365.0+ KB <jupyter_text>Examples: { "Unnamed: 0": 0, "ref": 2454, "company": "5150", "company_location": "U.S.A", "review_date": 2019, "country_of_bean_origin": "Madagascar", "specific_bean_origin_or_bar_name": "Bejofo Estate, batch 1", "cocoa_percent": 76, "rating": 3.75, "counts_of_ingredients": 3, "beans": "have_bean", "cocoa_butter": "have_cocoa_butter", "vanilla": "have_not_vanila", "lecithin": "have_not_lecithin", "salt": "have_not_salt", "sugar": "have_sugar", "sweetener_without_sugar": "have_not_sweetener_without_sugar", "first_taste": "cocoa", "second_taste": "blackberry", "third_taste": "full body", "...": "and 1 more columns" } { "Unnamed: 0": 1, "ref": 2458, "company": "5150", "company_location": "U.S.A", "review_date": 2019, "country_of_bean_origin": "Dominican republic", "specific_bean_origin_or_bar_name": "Zorzal, batch 1", "cocoa_percent": 76, "rating": 3.5, "counts_of_ingredients": 3, "beans": "have_bean", "cocoa_butter": "have_cocoa_butter", "vanilla": "have_not_vanila", "lecithin": "have_not_lecithin", "salt": "have_not_salt", "sugar": "have_sugar", "sweetener_without_sugar": "have_not_sweetener_without_sugar", "first_taste": "cocoa", "second_taste": "vegetal", "third_taste": "savory", "...": "and 1 more columns" } { "Unnamed: 0": 2, "ref": 2454, "company": "5150", "company_location": "U.S.A", "review_date": 2019, "country_of_bean_origin": "Tanzania", "specific_bean_origin_or_bar_name": "Kokoa Kamili, batch 1", "cocoa_percent": 76, "rating": 3.25, "counts_of_ingredients": 3, "beans": "have_bean", "cocoa_butter": "have_cocoa_butter", "vanilla": "have_not_vanila", "lecithin": "have_not_lecithin", "salt": "have_not_salt", "sugar": "have_sugar", "sweetener_without_sugar": "have_not_sweetener_without_sugar", "first_taste": "rich cocoa", "second_taste": "fatty", "third_taste": "bready", "...": "and 1 more columns" } { "Unnamed: 0": 3, "ref": 797, "company": "A. Morin", "company_location": "France", "review_date": 2012, "country_of_bean_origin": "Peru", "specific_bean_origin_or_bar_name": "Peru", "cocoa_percent": 63, "rating": 3.75, "counts_of_ingredients": 4, "beans": "have_bean", "cocoa_butter": "have_cocoa_butter", "vanilla": "have_not_vanila", "lecithin": "have_lecithin", "salt": "have_not_salt", "sugar": "have_sugar", "sweetener_without_sugar": "have_not_sweetener_without_sugar", "first_taste": "fruity", "second_taste": "melon", "third_taste": "roasty", "...": "and 1 more columns" } <jupyter_script># # __CHOCOLATE BAR RECIPE TREND ANALYSIS (2006-2020)__ # __Here we will try to study chocolate bar ingredient trends, preferences by companies and rating. We will mostly use Numpy, Pandas to compute the results and, Matplotlib & Seaborn for plotting graphs.The dataset used in this project is taken from [kaggle.com](https://www.kaggle.com/soroushghaderi/chocolate-bar-2020?select=chocolate.csv). The dataset has many different information about chocolate bar companies such as 'company', 'company_location', 'country_of_bean_origin', 'review_date', chocolate 'rating', 'cocoa_percent', common ingredients and tastes information.__ # Let's look into our dataset. # To read the csv file, we will use ```pd.read_csv()``` function, where we will pass path to our csv file which we would like to use for this project. # Let's call it ```chocolate_raw_df``` as this is just raw or unprocessed dataset now, on which further modifications will be done for it to be prepared for data analysis. import pandas as pd chocolate_raw_df = pd.read_csv("../input/chocolate-bar-2020/chocolate.csv") chocolate_raw_df # Looks like there are 21 columns. Let's see all the columns using ```chocolate_raw_df.columns``` chocolate_raw_df.columns # ## __Data Preparation and Cleaning__ # In this section, we select relevant data, explore various details such as shape, unique values, information about columns, its values, missing values, count the same, memory usage, sample the same etc. and make any appropriate changes if needed. # Let's select a subset of columns with the relevant data for our analysis. selected_columns = [ # Company and respective ratings "company", "company_location", "country_of_bean_origin", "review_date", "rating", # Ingredients "cocoa_percent", "counts_of_ingredients", "cocoa_butter", "vanilla", "lecithin", "salt", "sugar", # Tastes "first_taste", "second_taste", "third_taste", "fourth_taste", ] # lets check how many columns we have selected len(selected_columns) # We will be using copy() function to NOT modify original data frame # and to actually create a separate one derived from original chocolate_df = chocolate_raw_df[selected_columns].copy() chocolate_df # Lets use ```pandas.DataFrame.shape``` here, which return's a tuple representing the dimensionality of the DataFrame. chocolate_df.shape # Now, looking into values in columns such as _cocoa_butter, vanilla, lecithin, salt and, sugar_ we can see some kind of similar variation in data of each column. Let's have a look at one of these: chocolate_df.lecithin # Let's check unique values in this particular column chocolate_df.lecithin.unique() # We can actually deal with these values and manually adjust the data type for each column on a case-by-case basis. # To make our further analysis easier, the best way is to change the values into boolean ```True``` and ```False```, where if _'not'_ is present in the string, it will be taken as _'False'_ to show 'absence' of an item and _'True'_ otherwise. # To carry these, we will use the functions below in our custom function _'change_to_boolean'_: # * ```pandas.DataFrame.apply``` # > __Format:__ ```DataFrame.apply(func, axis=0, raw=False, result_type=None, args=(), **kwds)``` # > Apply a function along an axis of the DataFrame. # * and ```lambda``` which represents an anonymous function, where if it is used with previous ```pd.Series.apply```, each element of the series is fed into this lambda function. Here we use this for our _if-else_ condition mentioned above. # > The result will be another ```pd.Series``` with each element run through that ```lambda```. # To check the output later and also to verify with original data, we will use # ```pandas.Series.value_counts``` # __Format:__ ```Series.value_counts(normalize=False, sort=True, ascending=False, bins=None, dropna=True)``` # Returns a Series containing counts of unique values. The resulting object will be in descending order so the first element is the most frequently-occurring element. Excludes NA values by default. def change_to_boolean(col_series): return col_series.apply(lambda x: False if "not" in x else True) # old values (to verify) chocolate_df.cocoa_butter.value_counts() chocolate_df["cocoa_butter"] = change_to_boolean(chocolate_df["cocoa_butter"]) chocolate_df.cocoa_butter.value_counts() # old values (to verify) chocolate_df.vanilla.value_counts() chocolate_df["vanilla"] = change_to_boolean(chocolate_df["vanilla"]) chocolate_df.vanilla.value_counts() # old values (to verify) chocolate_df.lecithin.value_counts() chocolate_df["lecithin"] = change_to_boolean(chocolate_df["lecithin"]) chocolate_df["lecithin"].value_counts() # old values (to verify) chocolate_df.salt.value_counts() chocolate_df["salt"] = change_to_boolean(chocolate_df["salt"]) chocolate_df.salt.value_counts() # old values (to verify) chocolate_df.sugar.value_counts() chocolate_df["sugar"] = change_to_boolean(chocolate_df["sugar"]) chocolate_df.sugar.value_counts() chocolate_df # Let's now use ```pandas.DataFrame.info``` to print a concise summary of our DataFrame. # > __Format:__```DataFrame.info(verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None)``` # > This method prints information about a DataFrame including the index dtype and columns, non-null values and memory usage. chocolate_df.info() # We can also check missing values using ```pandas.DataFrame.isna``` # > __Format:__ ```DataFrame.isna()``` # > Returns dataFrame: Mask of bool values for each element in DataFrame that indicates whether an element is not an NA value. # and hence, adding all to get _total sum_ of missing values in each column. chocolate_df.isna().sum() # Let's now see all the columns again chocolate_df.columns # We can also use ```pandas.DataFrame.describe``` to generate descriptive statistics. # > __Format:__ ```DataFrame.describe(percentiles=None, include=None, exclude=None, datetime_is_numeric=False)[source]``` # _Descriptive statistics_ include those that summarize the central tendency, dispersion and shape of a dataset’s distribution, excluding ```NaN``` values. Analyzes both numeric and object series, as well as ```DataFrame``` column sets of mixed data types. chocolate_df.describe() # This was cool. Let's see what are the companies included in our data. chocolate_df.company.value_counts() # We can see company 'Soma' has many variety of chocolate bars in our dataset. Let's check in details: soma_df = chocolate_df[chocolate_df.company == "Soma"] soma_df # Well, we've now cleaned up and prepared the dataset all ready for analysis. # Let's take a look at sample of rows from the data frame. chocolate_df.sample(10) # ## __Exploratory Analysis and Visualization__ # In this section, we compute mean, percentage etc. sum etc. We also sort values, explore some more kinds of plot graphs, draw venn diagram and learn about correlation to know interdependence between two or more column variables. We also look into other useful functions such as size and head. # center all output images using HTML from IPython.core.display import HTML HTML( """ <style> .output_png { display: table-cell; text-align: center; vertical-align: middle; } </style> """ ) # Let's begin our analysis and visualization journey by importing ```matplotlib.pyplot``` and ```seaborn``` first. import seaborn as sns import matplotlib import matplotlib.pyplot as plt sns.set_style("darkgrid") matplotlib.rcParams["font.size"] = 14 matplotlib.rcParams["figure.figsize"] = (12, 6) matplotlib.rcParams["figure.facecolor"] = "#00000000" matplotlib.rcParams["axes.labelsize"] = 14 matplotlib.rcParams["axes.titlesize"] = 18 matplotlib.rcParams["xtick.labelsize"] = 14 matplotlib.rcParams["ytick.labelsize"] = 14 # ### 1. Company And Ingredients # Let's look into how common an ingredient is among companies. # Total companies chocolate_df.company.nunique() # We create ```ingredients_df``` to view the present data in consideration. Here we will be using functions such as: # * ```pandas.DataFrame.mean``` # > __Format:__ ```DataFrame.mean(axis=None, skipna=None, level=None, numeric_only=None, **kwargs)``` # > Return the mean of the values for the requested axis as Series or DataFrame (if level specified). # * ```pandas.DataFrame.sort_values``` # > __Format:__ ```DataFrame.sort_values(by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last', ignore_index=False, key=None)``` # > Sort by the values along either axis. # * We will use horizontal bars to visualize our data. Rotating to a horizontal bar chart from traditional vertical one, is one way to give some variance to a report. This kind of chart also allow for extra long bar titles.To draw a set of horizontal bars here, we will use```seaborn.barplot``` # > __Format:__ ```ax = sns.barplot(x, y)``` # Labelling of the axis is achieved using the Matplotlib syntax on the “```plt```” object imported from ```pyplot```. The key functions used here are: # * “```xlabel```” to add an x-axis label # * “```ylabel```” to add a y-axis label # * “```title```” to add a plot title ingredients_df = chocolate_df[ ["cocoa_butter", "vanilla", "lecithin", "salt", "sugar"] ].copy() ingredients_df # Let's check type of any one column value, which was modified earlier # using our custom function change_to_boolean type(ingredients_df.cocoa_butter[0]) # Percentage of companies preferring an ingredient ingredients_percentage = ingredients_df.mean().sort_values(ascending=False) * 100 ingredients_percentage plt.figure(figsize=(12, 6)) sns.barplot(ingredients_percentage, ingredients_percentage.index, palette="Paired_r") plt.title("Common Ingredients Preference") plt.xlabel("percentage of companies") # **Summary:** # Sugar is the most common ingredient, followed by cocoa butter, lecithin and vanilla. Salt as least preferred by companies. # *** # ### 2. Tastes # In the dataset, in column description, we noticed that there is data of first taste, second, third and fourth taste. Lets look into all the tastes, all the common tastes which is switched between these four, most common tastes in each of these four categories and finally draw a venn diagram to get a better view. # Hence, here we can learn all the different tastes present, tastes preferred as first, second and third. Since, fourth taste is rarely there, lets ignore this column for now. # Some functions we explore in this section are: # * ```pandas.Series.ravel``` # > __Format:__ ```Series.ravel(order='C')``` # > Returns the flattened underlying data as an numpy.ndarray or ndarray-like # * ```pandas.unique``` # > __Format:__ ```pandas.unique(values)``` # > Uniques are returned in order of appearance, though this does NOT sort. # > Significantly faster than numpy.unique. Includes NA values. # * ```pandas.DataFrame.count``` # > __Format:__ ```DataFrame.count(axis=0, level=None, numeric_only=False)``` # > Counts non-NA cells for each column or row. The values None, NaN, NaT, and optionally numpy.inf (depending on pandas.options.mode.use_inf_as_na) are considered NA. # * ```pandas.DataFrame.size``` # > __Format:__ ```property DataFrame.size``` # > Return an int representing the number of elements in this object. Returns the number of rows if Series. Otherwise returns the number of rows times number of columns if DataFrame. # * ```pandas.DataFrame.head``` # > __Format:__ ```DataFrame.head(n=5)``` # > This function returns the first n rows for the object based on position. It is useful for quickly testing if an object has the right type of data in it. # > For negative values of n, this function returns all rows except the last n rows, equivalent to df[:-n]. # * Functions provided by ```matplotlib-venn``` for plotting area-proportional two- and three-way _Venn diagrams_ in matplotlib. # > The functions ```venn2_circles``` and ```venn3_circles``` draw just the circles, whereas the functions ```venn2``` and ```venn3``` draw the diagrams as a collection of colored patches, annotated with text labels. To install: # > ```pip install matplotlib-venn``` # # various tastes column_values = chocolate_df[ ["first_taste", "second_taste", "third_taste"] ].values.ravel() unique_values = pd.unique(column_values) # type(unique_values) unique_values.size # first_taste preference among companies as percentage first_taste = ( chocolate_df.first_taste.value_counts() * 100 / chocolate_df.first_taste.count() ) first_taste.head(10) # second_taste preference among companies as percentage second_taste = ( chocolate_df.second_taste.value_counts() * 100 / chocolate_df.second_taste.count() ) second_taste.head(10) # third_taste preference among companies as percentage third_taste = ( chocolate_df.third_taste.value_counts() * 100 / chocolate_df.third_taste.count() ) third_taste.head(10) # Since having three tastes is pretty common, we consider ```first_taste```, ```second_taste``` and ```third_taste``` data from our ```chocolate_df``` dataframe. As this is a _three-circle_ case, we will be using ```venn3``` function. # pip install matplotlib-venn from matplotlib_venn import venn2, venn2_circles from matplotlib_venn import venn3, venn3_circles first_taste = set(chocolate_df["first_taste"]) second_taste = set(chocolate_df["second_taste"]) third_taste = set(chocolate_df["third_taste"]) plt.figure(figsize=(12, 6)) venn3( [first_taste, second_taste, third_taste], ("First Taste", "Second Taste", "Third Taste"), ) plt.title("Number. of Unique and Common Tastes") plt.show() # Next, we use ```list(set(df1.A) & set(df2.A) & set(df3.A))``` to find total common tastes. # common tastes a = list(first_taste & second_taste & third_taste) len(a) # *** # ### 3. Percentage of Cocoa and Variation Over Years # Let's use ```seaborn.lineplot``` this time to draw a line plot with possibility of several semantic groupings. # > __Format:__ ```seaborn.lineplot(*, x=None, y=None, hue=None, size=None, style=None, data=None, palette=None, hue_order=None, hue_norm=None, sizes=None, size_order=None, size_norm=None, dashes=True, markers=None, style_order=None, units=None, estimator='mean', ci=95, n_boot=1000, seed=None, sort=True, err_style='band', err_kws=None, legend='auto', ax=None, **kwargs)``` # > By default, the plot aggregates over multiple y values at each value of x and shows an estimate of the central tendency and a confidence interval for that estimate.Passing the entire dataset in long-form mode will aggregate over repeated values (each year) to show the mean and 95% confidence interval: plt.figure(figsize=(12, 6)) sns.lineplot(chocolate_df.review_date, chocolate_df.cocoa_percent) plt.title("Pecentage of Cocoa Used Over Years(2006 - 2020)") # **Summary:** # In 2009, less cocoa amount in chocolate bars was popular, but 71-73% cocoa is generally popular over the years. So, 71-73% cocoa is a safe bet! # *** # ### 4. Rating and Cocoa Percent # Let's see how _cocoa_ amount in chocoloate bars affects its rating. # In this section we explore how the dependence of two variables can be analyzed w.r.t. each other. We can use _joint plot_. A ```jointplot``` augments a bivariate relational or distribution plot with the marginal distributions of the two variables. # In short. we visualize how rating & cocoa amount vary using the ```jointplot``` function from ```seaborn```. # > __Format:__ ```jointplot(x, y[, data, kind, stat_func, ...])``` # > Setting a different ```kind="kde"``` in ```jointplot()``` basically combines two different plots._KDE_ shows the density where the points match up the most . Therefore, It is used to draw a plot of two variables with bivariate and univariate graphs. # > A kernel density estimate (KDE) plot is a method for visualizing the distribution of observations in a dataset, analagous to a histogram. Several other figure-level plotting functions in seaborn make use of the ```histplot()``` and ```kdeplot()``` functions. # > x and y are two strings that are the column names and the data that column contains is used by specifying the data parameter. # here we can see ```cocoa_percent``` on the _y axis_ and ```rating``` on the _x axis_. _Shade of color_ represents the density of values in a region of the graph. plt_s = sns.jointplot(chocolate_df.rating, chocolate_df.cocoa_percent, kind="kde") plt_s.fig.suptitle("Rating and Cocoa Percent") plt_s.ax_joint.collections[0].set_alpha(0) plt_s.fig.tight_layout() plt_s.fig.subplots_adjust(top=0.95) plt_s.fig.set_figwidth(12) plt_s.fig.set_figheight(7) # **Summary** # As noticed in previous section 71-73% cocoa being popular among the years. Here, its proved that this generous amount is a safe bet for decent rating. And it is certainly NOT the case that higher the amount of cocoa, higher the rating, though lesser amount of cocoa than average is also a good risk. # *** # ### 5. Correlation between different columns # To see interdependence between two or more variables, use correlation function ```pandas.DataFrame.corr```. Then we can, check all the correlations simultaneously. # > __Format:__ ```DataFrame.corr(method='pearson', min_periods=1)``` # > Computes pairwise correlation of columns, excluding NA/null values. # > Returns : A DataFrame (Correlation matrix). # > The _Pearson method_ is used by default, but the _Pandas_ allows the use of other indexes. # > * 0.9 to 1 positive or negative indicates a very strong correlation. # > * 0.7 to 0.9 positive or negative indicates a strong correlation. # > * 0.5 to 0.7 positive or negative indicates a moderate correlation. # > * 0.3 to 0.5 positive or negative indicates a weak correlation. # > * 0 to 0.3 positive or negative indicates a negligible corr # To facilitate this visualization of the correlations, it is possible to use the colors. Let's use the ```heatmap``` function in ```seaborn```. # > ```seaborn.heatmap``` # > __Format:__ ```seaborn.heatmap(data, *, vmin=None, vmax=None, cmap=None, center=None, robust=False, annot=None, fmt='.2g', annot_kws=None, linewidths=0, linecolor='white', cbar=True, cbar_kws=None, cbar_ax=None, square=False, xticklabels='auto', yticklabels='auto', mask=None, ax=None, **kwargs)``` # > Plots rectangular data as a color-encoded matrix. # > Annotates each cell with the numeric value using integer formatting. # > The color gradation is observed in relation to the positive and negative correlations. rating_and_composition = chocolate_df[ [ "rating", "cocoa_percent", "counts_of_ingredients", "cocoa_butter", "vanilla", "lecithin", "salt", "sugar", ] ] rating_and_composition plt.figure(figsize=(12, 9)) Chocolate_corr = rating_and_composition.corr() sns.heatmap( Chocolate_corr, xticklabels=Chocolate_corr.columns, yticklabels=Chocolate_corr.columns, annot=True, cmap="YlOrBr", linewidths=0.5, ) plt.title("TITLE") # **Summary** # From the measuring chart in section introduction, we can see _lecithin and cocoa butter;_ or _lecithin and vanilla_ have a weak correlation whereas _vanilla and rating_; _cocoa percent and cocoa butter/lecithin/vanilla_ have negligible correlation. # ## __Asking and Answering Questions__ # Now all that analysis and visualization in previous section have made us more curious about the whole dataset. Let's look into some common questions that comes to the mind and try to solve the same. # ### Q1. How presence of cocoa butter and lecithin effect rating in last three years (2018-2020)? # To lower the viscosity of chocolate and to actually bind the ingredients, both of these element serves this big purpose. But the question is which of these is popular. Well actually cocoa butter is always the better option but its comparatively expensive. Next option is to use both of these in right quantities or use only lecithin to produce cheapest variety of chocolates. In this section, lets check, how these factors effect chocolate bar rating. # We create ```cocoa_or_lecithin_all``` to view the present data in consideration. Then lets create ```cocoa_or_lecithin``` which only has last 3 years data from dataframe ```cocoa_or_lecithin_all```. We move '```cocoa_butter```' and '```lecithin```' to the ```index``` and then ```unstack``` them. This action will assume we have only one (lecithin, cocoa_butter) combination per rating. # ```Stacking``` a DataFrame means moving (also rotating or pivoting) the innermost column index to become the innermost row index and yes as you guessed, the inverse operation is called _unstacking_ which means moving the innermost row index to become the innermost column index again. # Here we will be using visualization function: # ```pandas.DataFrame.plot``` # > __Format:__ ```DataFrame.plot(x=None, y=None, kind='line', ax=None, subplots=False, sharex=None, sharey=False, layout=None, figsize=None, use_index=True, title=None, grid=None, legend=True, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False, **kwds)``` # > Make plots of a Series or a DataFrame. # cocoa_or_lecithin_all = chocolate_df[ ["review_date", "rating", "cocoa_butter", "lecithin"] ].copy() cocoa_or_lecithin = cocoa_or_lecithin_all[ (cocoa_or_lecithin_all.review_date >= 2018) & (cocoa_or_lecithin_all.review_date <= 2020) ].reset_index(drop=True) # cocoa_or_lecithin = cocoa_or_lecithin.set_index('review_date') cocoa_or_lecithin cocoa_or_lecithin.set_index(["cocoa_butter", "lecithin"], append=True, inplace=True) cocoa_or_lecithin_df = ( cocoa_or_lecithin.unstack(["cocoa_butter", "lecithin"]) .xs("rating", axis=1) .plot(figsize=(12, 7), colormap="plasma") ) cocoa_or_lecithin_df.legend( ["Only Cocoa butter", "Both", "None", "Only Lecithin"], prop={"size": 14} ) plt.title("Rating vs Cocoa butter, Lecithin in Chocolates (2018-2020)", fontsize=18) plt.ylabel("Rating", fontsize=14) plt.yticks(fontsize=12) cocoa_or_lecithin_df.set_facecolor("grey") # cocoa_butter, lecithin # True, False (Only Cocoa butter) # True, True (Both) # False, False (None) # False, True (Only Lecithin) # **Summary:** # As we can see here using both cocoa butter and lecithin does equally good. But only adding Cocoa butter does have better chances in gaining good score. # *** # ### Q2. How much cocoa is actually preferred by top companies? # We use ```pandas.DataFrame.max``` here: # > __Format__ ```DataFrame.max(axis=None, skipna=None, level=None, numeric_only=None, **kwargs)``` # > Returns the maximum of the values for the requested axis. top_rated_df = chocolate_df[chocolate_df.rating == chocolate_df.rating.max()] top_rated_df # Let's check percentage of cocoa each of these top companies have. # To plot a histogram, we use ```matplotlib.pyplot.hist``` here. # > __Format:__ ```matplotlib.pyplot.hist(x, bins=None, range=None, density=False, weights=None, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, color=None, label=None, stacked=False, *, data=None, **kwargs)[source]``` plt.figure(figsize=(12, 6)) plt.title("Percentage of Cocoa in Chocolates") plt.xlabel("Percentage of cocoa") plt.ylabel("Number of companies") plt.hist(top_rated_df.cocoa_percent, bins=[30, 50, 60, 70, 85, 99], color="maroon") # **Summary:** # Chocolate is broadly classified by the amount of cocoa it contains. And generally, over 70% cocoa is dark chocolate. Therefore, this implies, many top rated companies have preferred manufacturing dark chocolates over other versions of chocolates. # *** # ### Q3. From which countries, top companies import cocoa beans? # Note: If _x-axis_ labels that are too long for comfortable display, there’s two options in this case – # rotating the labels to make a bit more space, or rotating the entire chart to end up with a horizontal bar chart. # The ```xticks``` function from ```Matplotlib``` is used here, with the rotation. # > The Matplotlib “```xtick```” function is used to rotate the labels on axes, allowing for longer labels when needed. top_bean_countries = top_rated_df.country_of_bean_origin.value_counts() top_bean_countries plt.figure(figsize=(12, 6)) plt.xticks(rotation=75) plt.title("Countries of Bean Origin of Chocolates(In Top Rated)") sns.barplot(top_bean_countries.index, top_bean_countries, palette="viridis") plt.xlabel("Bean Origin(Country)") plt.ylabel("") # **Summary** # It can be seen that most of these countries are _'developing~countries'_. # *** # ### Q4. What must have been the recipe of top rated chocolate in the last year 2019? Also, what tastes are in top rated chocolates during 2016-2020? top_rated_recent = top_rated_df[ (top_rated_df.review_date >= 2016) & (top_rated_df.review_date <= 2020) ] top_rated_recent # *** # #### 2019 recipe: # We have been displaying dataframes. But it gets boring to look into same design every~time. For this special display of _**recipe**_, let's add background color. We will use: # ```df.style.set_properties``` # > By using this, we can use inbuilt functionality to manipulate data frame styling from font color to background color. # > ```DataFrame.style``` property, returns styler object having a number of useful methods for formatting and visualizing the data frames. recipe_df = top_rated_recent[(top_rated_recent.review_date == 2019)] recipe_df = recipe_df[ [ "company", "company_location", "cocoa_percent", "cocoa_butter", "vanilla", "lecithin", "salt", "sugar", "first_taste", "second_taste", "third_taste", ] ] recipe_df = recipe_df.set_index("company") recipe_df.style.set_properties(**{"background-color": "brown", "color": "yellow"}) # *** # #### Tastes in Top Rated Chocolates (2016-2020): # Lets explore Pandas ```Dataframe.plot.bar()``` to see _first, second and third tastes_ preferred by various companies. # There is also ```.plot(kind="bar")``` older syntax; however direct functions for .bar() now exists on the ```DataFrame.plot``` object that act as wrappers around the plotting functions. # The next step for our bar charting journey is to compare. Typically this leads to an “```unstacked```” bar plot, but here we go for ```stacked``` version of the bar plot. The bars at each index point from the unstacked bar chart are literally “```stacked```” on top of one another. # Pandas makes this easy with the “```stacked```” argument for the plot command. # Hence, _The Stacked Bar Chart_ places the values at each sample or index point in the DataFrame on top of one another. # > Stacked bar charts are best for examining patterns in the composition of the totals at each sample point. # plt.style.use("ggplot") df = top_rated_recent[["company", "first_taste", "second_taste", "third_taste"]] cols_to_plot = ["first_taste", "second_taste", "third_taste"] fig, ax = plt.subplots(nrows=len(cols_to_plot), ncols=1, figsize=(15, 26)) for i, col in enumerate(cols_to_plot): plt.subplot(ax[i]) df.groupby([col, "company"]).company.count().unstack().plot.bar( ax=ax[i], legend=True, stacked=True ) plt.xlabel(col, fontsize=14) plt.ylabel("Company", fontsize=14) plt.xticks(fontsize=12, rotation=75) plt.yticks(fontsize=12) plt.legend(fontsize=10) fig.tight_layout(pad=5.0) fig.suptitle("Tastes in Top Rated Chocolates (2016-2020)", fontsize=18) # or plt.suptitle("Tastes in Top Rated Chocolates (2016-2020)"); # **Summary** # Well top recipe does indicate dark chocolate. One of the recipe being using only cocoa butter and sugar over lecithin where it has creamy, fruity & nutty tastes. Another being cocoa butter, sugarfree with fig as the only taste. # While in top tastes, it seems that creamy is pretty popular as first taste, followed by honey as second taste and nutty & cocoa~like in third taste. # *** # ### Q5. What are the major regions of chocolate, companies of which, generally makes it to Top 50? # In short, Countries with most companies in Top 50 # We take average rating of each company here. # **NOTE:** If you need to work with a dataframe after aggregation, use ```as_index=False``` chocolate_df average_rating_df = chocolate_df.groupby( ["company", "company_location"], as_index=False )[["rating"]].mean() average_rating_df top_fifty_df = average_rating_df.sort_values("rating", ascending=False).head(50) top_fifty_df top_countries = top_fifty_df.company_location.value_counts() top_countries plt.figure(figsize=(12, 6)) plt.xticks(rotation=75) plt.title("Countries in Top 50") sns.barplot(top_countries.index, top_countries, palette="hls") plt.xlabel("Country") plt.ylabel("")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/248/69248918.ipynb
chocolate-bar-2020
soroushghaderi
[{"Id": 69248918, "ScriptId": 18904263, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5834392, "CreationDate": "07/28/2021 15:47:56", "VersionNumber": 1.0, "Title": "CHOCOLATE BAR RECIPE TREND ANALYSIS (2006-2020)", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 623.0, "LinesInsertedFromPrevious": 623.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92184933, "KernelVersionId": 69248918, "SourceDatasetVersionId": 1093011}]
[{"Id": 1093011, "DatasetId": 610579, "DatasourceVersionId": 1122993, "CreatorUserId": 4122600, "LicenseName": "ODC Attribution License (ODC-By)", "CreationDate": "04/19/2020 05:09:47", "VersionNumber": 1.0, "Title": "Chocolate Bar 2020", "Slug": "chocolate-bar-2020", "Subtitle": "2300 chocolate bar ratings", "Description": "### Context\n\nChocolate is one of the most popular candies in the world. Each year, residents of the United States collectively eat more than 2.8 billion pounds. However, not all chocolate bars are created equal! This dataset contains expert ratings of over 1,700 individual chocolate bars, along with information on their regional origin, percentage of cocoa, the variety of chocolate bean used, and where the beans were grown.\n\n## Flavors of Cacao Rating System:\n\nRating Scale\n \n4.0 - 5.0 = Outstanding\n3.5 - 3.9 = Highly Recommended\n3.0 - 3.49 = Recommended\n2.0 - 2.9 = Disappointing\n1.0 - 1.9 = Unpleasant\n\n*Not all the bars in each range are considered equal, so to show variance from bars in the same range I have assigned .25, .50 or .75.\n\nEach chocolate is evaluated from a combination of both objective qualities and subjective interpretation. A rating here only represents an experience with one bar from one batch. Batch numbers, vintages, and review dates are included in the database when known. I would recommend people to try all the chocolate on the database regardless of the rating and experience for themselves.\n\nThe database is narrowly focused on plain dark chocolate to appreciate the flavors of the cacao when made into chocolate. The ratings do not reflect health benefits, social missions, or organic status.\n\nThe flavor is the most important component of the Flavors of Cacao ratings. Diversity, balance, intensity, and purity of flavors are all considered. A straight forward single note chocolate can rate as high as a complex flavor profile that changes throughout. Genetics, terroir, post-harvest techniques, processing, and storage can all be discussed when considering the flavor component.\n\nTexture has a great impact on the overall experience and it is also possible for texture related issues to impact flavor. It is a good way to evaluate the makers' vision, attention to detail, and level of proficiency.\n\nAftermelt is the experience after the chocolate has melted. Higher quality chocolate will linger and be long-lasting and enjoyable. Since the after melt is the last impression you get from the chocolate, it receives equal importance in the overall rating.\n\nOverall Opinion is really where the ratings reflect a subjective opinion. Ideally, it is my evaluation of whether or not the components above worked together and opinion on the flavor development, character, and style. It is also here where each chocolate can usually be summarized by the most prominent impressions that you would remember about each chocolate\n\n\n### Acknowledgements\n\nThese ratings were compiled by Brady Brelinski, Founding Member of the Manhattan Chocolate Society. For up-to-date information, as well as additional content (including interviews with craft chocolate makers), please see his website: [Flavors of Cacao](http://flavorsofcacao.com/index.html)\n\n\n### Inspiration\n\nWe have multiple questions to answer, in the below list we answer most important pieces of information that possible to answer.\\\n1. Where are the best cocoa beans grown?\n2. Which countries produce the highest-rated bars?\n3. Who creates the best Chocolate bars?\n4. What is Favorite taste?\n5. Which company has highest Rate?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 610579, "CreatorUserId": 4122600, "OwnerUserId": 4122600.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1093011.0, "CurrentDatasourceVersionId": 1122993.0, "ForumId": 624637, "Type": 2, "CreationDate": "04/19/2020 05:09:47", "LastActivityDate": "04/19/2020", "TotalViews": 28247, "TotalDownloads": 3987, "TotalVotes": 52, "TotalKernels": 10}]
[{"Id": 4122600, "UserName": "soroushghaderi", "DisplayName": "Soroush Ghaderi", "RegisterDate": "11/28/2019", "PerformanceTier": 1}]
# # __CHOCOLATE BAR RECIPE TREND ANALYSIS (2006-2020)__ # __Here we will try to study chocolate bar ingredient trends, preferences by companies and rating. We will mostly use Numpy, Pandas to compute the results and, Matplotlib & Seaborn for plotting graphs.The dataset used in this project is taken from [kaggle.com](https://www.kaggle.com/soroushghaderi/chocolate-bar-2020?select=chocolate.csv). The dataset has many different information about chocolate bar companies such as 'company', 'company_location', 'country_of_bean_origin', 'review_date', chocolate 'rating', 'cocoa_percent', common ingredients and tastes information.__ # Let's look into our dataset. # To read the csv file, we will use ```pd.read_csv()``` function, where we will pass path to our csv file which we would like to use for this project. # Let's call it ```chocolate_raw_df``` as this is just raw or unprocessed dataset now, on which further modifications will be done for it to be prepared for data analysis. import pandas as pd chocolate_raw_df = pd.read_csv("../input/chocolate-bar-2020/chocolate.csv") chocolate_raw_df # Looks like there are 21 columns. Let's see all the columns using ```chocolate_raw_df.columns``` chocolate_raw_df.columns # ## __Data Preparation and Cleaning__ # In this section, we select relevant data, explore various details such as shape, unique values, information about columns, its values, missing values, count the same, memory usage, sample the same etc. and make any appropriate changes if needed. # Let's select a subset of columns with the relevant data for our analysis. selected_columns = [ # Company and respective ratings "company", "company_location", "country_of_bean_origin", "review_date", "rating", # Ingredients "cocoa_percent", "counts_of_ingredients", "cocoa_butter", "vanilla", "lecithin", "salt", "sugar", # Tastes "first_taste", "second_taste", "third_taste", "fourth_taste", ] # lets check how many columns we have selected len(selected_columns) # We will be using copy() function to NOT modify original data frame # and to actually create a separate one derived from original chocolate_df = chocolate_raw_df[selected_columns].copy() chocolate_df # Lets use ```pandas.DataFrame.shape``` here, which return's a tuple representing the dimensionality of the DataFrame. chocolate_df.shape # Now, looking into values in columns such as _cocoa_butter, vanilla, lecithin, salt and, sugar_ we can see some kind of similar variation in data of each column. Let's have a look at one of these: chocolate_df.lecithin # Let's check unique values in this particular column chocolate_df.lecithin.unique() # We can actually deal with these values and manually adjust the data type for each column on a case-by-case basis. # To make our further analysis easier, the best way is to change the values into boolean ```True``` and ```False```, where if _'not'_ is present in the string, it will be taken as _'False'_ to show 'absence' of an item and _'True'_ otherwise. # To carry these, we will use the functions below in our custom function _'change_to_boolean'_: # * ```pandas.DataFrame.apply``` # > __Format:__ ```DataFrame.apply(func, axis=0, raw=False, result_type=None, args=(), **kwds)``` # > Apply a function along an axis of the DataFrame. # * and ```lambda``` which represents an anonymous function, where if it is used with previous ```pd.Series.apply```, each element of the series is fed into this lambda function. Here we use this for our _if-else_ condition mentioned above. # > The result will be another ```pd.Series``` with each element run through that ```lambda```. # To check the output later and also to verify with original data, we will use # ```pandas.Series.value_counts``` # __Format:__ ```Series.value_counts(normalize=False, sort=True, ascending=False, bins=None, dropna=True)``` # Returns a Series containing counts of unique values. The resulting object will be in descending order so the first element is the most frequently-occurring element. Excludes NA values by default. def change_to_boolean(col_series): return col_series.apply(lambda x: False if "not" in x else True) # old values (to verify) chocolate_df.cocoa_butter.value_counts() chocolate_df["cocoa_butter"] = change_to_boolean(chocolate_df["cocoa_butter"]) chocolate_df.cocoa_butter.value_counts() # old values (to verify) chocolate_df.vanilla.value_counts() chocolate_df["vanilla"] = change_to_boolean(chocolate_df["vanilla"]) chocolate_df.vanilla.value_counts() # old values (to verify) chocolate_df.lecithin.value_counts() chocolate_df["lecithin"] = change_to_boolean(chocolate_df["lecithin"]) chocolate_df["lecithin"].value_counts() # old values (to verify) chocolate_df.salt.value_counts() chocolate_df["salt"] = change_to_boolean(chocolate_df["salt"]) chocolate_df.salt.value_counts() # old values (to verify) chocolate_df.sugar.value_counts() chocolate_df["sugar"] = change_to_boolean(chocolate_df["sugar"]) chocolate_df.sugar.value_counts() chocolate_df # Let's now use ```pandas.DataFrame.info``` to print a concise summary of our DataFrame. # > __Format:__```DataFrame.info(verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None)``` # > This method prints information about a DataFrame including the index dtype and columns, non-null values and memory usage. chocolate_df.info() # We can also check missing values using ```pandas.DataFrame.isna``` # > __Format:__ ```DataFrame.isna()``` # > Returns dataFrame: Mask of bool values for each element in DataFrame that indicates whether an element is not an NA value. # and hence, adding all to get _total sum_ of missing values in each column. chocolate_df.isna().sum() # Let's now see all the columns again chocolate_df.columns # We can also use ```pandas.DataFrame.describe``` to generate descriptive statistics. # > __Format:__ ```DataFrame.describe(percentiles=None, include=None, exclude=None, datetime_is_numeric=False)[source]``` # _Descriptive statistics_ include those that summarize the central tendency, dispersion and shape of a dataset’s distribution, excluding ```NaN``` values. Analyzes both numeric and object series, as well as ```DataFrame``` column sets of mixed data types. chocolate_df.describe() # This was cool. Let's see what are the companies included in our data. chocolate_df.company.value_counts() # We can see company 'Soma' has many variety of chocolate bars in our dataset. Let's check in details: soma_df = chocolate_df[chocolate_df.company == "Soma"] soma_df # Well, we've now cleaned up and prepared the dataset all ready for analysis. # Let's take a look at sample of rows from the data frame. chocolate_df.sample(10) # ## __Exploratory Analysis and Visualization__ # In this section, we compute mean, percentage etc. sum etc. We also sort values, explore some more kinds of plot graphs, draw venn diagram and learn about correlation to know interdependence between two or more column variables. We also look into other useful functions such as size and head. # center all output images using HTML from IPython.core.display import HTML HTML( """ <style> .output_png { display: table-cell; text-align: center; vertical-align: middle; } </style> """ ) # Let's begin our analysis and visualization journey by importing ```matplotlib.pyplot``` and ```seaborn``` first. import seaborn as sns import matplotlib import matplotlib.pyplot as plt sns.set_style("darkgrid") matplotlib.rcParams["font.size"] = 14 matplotlib.rcParams["figure.figsize"] = (12, 6) matplotlib.rcParams["figure.facecolor"] = "#00000000" matplotlib.rcParams["axes.labelsize"] = 14 matplotlib.rcParams["axes.titlesize"] = 18 matplotlib.rcParams["xtick.labelsize"] = 14 matplotlib.rcParams["ytick.labelsize"] = 14 # ### 1. Company And Ingredients # Let's look into how common an ingredient is among companies. # Total companies chocolate_df.company.nunique() # We create ```ingredients_df``` to view the present data in consideration. Here we will be using functions such as: # * ```pandas.DataFrame.mean``` # > __Format:__ ```DataFrame.mean(axis=None, skipna=None, level=None, numeric_only=None, **kwargs)``` # > Return the mean of the values for the requested axis as Series or DataFrame (if level specified). # * ```pandas.DataFrame.sort_values``` # > __Format:__ ```DataFrame.sort_values(by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last', ignore_index=False, key=None)``` # > Sort by the values along either axis. # * We will use horizontal bars to visualize our data. Rotating to a horizontal bar chart from traditional vertical one, is one way to give some variance to a report. This kind of chart also allow for extra long bar titles.To draw a set of horizontal bars here, we will use```seaborn.barplot``` # > __Format:__ ```ax = sns.barplot(x, y)``` # Labelling of the axis is achieved using the Matplotlib syntax on the “```plt```” object imported from ```pyplot```. The key functions used here are: # * “```xlabel```” to add an x-axis label # * “```ylabel```” to add a y-axis label # * “```title```” to add a plot title ingredients_df = chocolate_df[ ["cocoa_butter", "vanilla", "lecithin", "salt", "sugar"] ].copy() ingredients_df # Let's check type of any one column value, which was modified earlier # using our custom function change_to_boolean type(ingredients_df.cocoa_butter[0]) # Percentage of companies preferring an ingredient ingredients_percentage = ingredients_df.mean().sort_values(ascending=False) * 100 ingredients_percentage plt.figure(figsize=(12, 6)) sns.barplot(ingredients_percentage, ingredients_percentage.index, palette="Paired_r") plt.title("Common Ingredients Preference") plt.xlabel("percentage of companies") # **Summary:** # Sugar is the most common ingredient, followed by cocoa butter, lecithin and vanilla. Salt as least preferred by companies. # *** # ### 2. Tastes # In the dataset, in column description, we noticed that there is data of first taste, second, third and fourth taste. Lets look into all the tastes, all the common tastes which is switched between these four, most common tastes in each of these four categories and finally draw a venn diagram to get a better view. # Hence, here we can learn all the different tastes present, tastes preferred as first, second and third. Since, fourth taste is rarely there, lets ignore this column for now. # Some functions we explore in this section are: # * ```pandas.Series.ravel``` # > __Format:__ ```Series.ravel(order='C')``` # > Returns the flattened underlying data as an numpy.ndarray or ndarray-like # * ```pandas.unique``` # > __Format:__ ```pandas.unique(values)``` # > Uniques are returned in order of appearance, though this does NOT sort. # > Significantly faster than numpy.unique. Includes NA values. # * ```pandas.DataFrame.count``` # > __Format:__ ```DataFrame.count(axis=0, level=None, numeric_only=False)``` # > Counts non-NA cells for each column or row. The values None, NaN, NaT, and optionally numpy.inf (depending on pandas.options.mode.use_inf_as_na) are considered NA. # * ```pandas.DataFrame.size``` # > __Format:__ ```property DataFrame.size``` # > Return an int representing the number of elements in this object. Returns the number of rows if Series. Otherwise returns the number of rows times number of columns if DataFrame. # * ```pandas.DataFrame.head``` # > __Format:__ ```DataFrame.head(n=5)``` # > This function returns the first n rows for the object based on position. It is useful for quickly testing if an object has the right type of data in it. # > For negative values of n, this function returns all rows except the last n rows, equivalent to df[:-n]. # * Functions provided by ```matplotlib-venn``` for plotting area-proportional two- and three-way _Venn diagrams_ in matplotlib. # > The functions ```venn2_circles``` and ```venn3_circles``` draw just the circles, whereas the functions ```venn2``` and ```venn3``` draw the diagrams as a collection of colored patches, annotated with text labels. To install: # > ```pip install matplotlib-venn``` # # various tastes column_values = chocolate_df[ ["first_taste", "second_taste", "third_taste"] ].values.ravel() unique_values = pd.unique(column_values) # type(unique_values) unique_values.size # first_taste preference among companies as percentage first_taste = ( chocolate_df.first_taste.value_counts() * 100 / chocolate_df.first_taste.count() ) first_taste.head(10) # second_taste preference among companies as percentage second_taste = ( chocolate_df.second_taste.value_counts() * 100 / chocolate_df.second_taste.count() ) second_taste.head(10) # third_taste preference among companies as percentage third_taste = ( chocolate_df.third_taste.value_counts() * 100 / chocolate_df.third_taste.count() ) third_taste.head(10) # Since having three tastes is pretty common, we consider ```first_taste```, ```second_taste``` and ```third_taste``` data from our ```chocolate_df``` dataframe. As this is a _three-circle_ case, we will be using ```venn3``` function. # pip install matplotlib-venn from matplotlib_venn import venn2, venn2_circles from matplotlib_venn import venn3, venn3_circles first_taste = set(chocolate_df["first_taste"]) second_taste = set(chocolate_df["second_taste"]) third_taste = set(chocolate_df["third_taste"]) plt.figure(figsize=(12, 6)) venn3( [first_taste, second_taste, third_taste], ("First Taste", "Second Taste", "Third Taste"), ) plt.title("Number. of Unique and Common Tastes") plt.show() # Next, we use ```list(set(df1.A) & set(df2.A) & set(df3.A))``` to find total common tastes. # common tastes a = list(first_taste & second_taste & third_taste) len(a) # *** # ### 3. Percentage of Cocoa and Variation Over Years # Let's use ```seaborn.lineplot``` this time to draw a line plot with possibility of several semantic groupings. # > __Format:__ ```seaborn.lineplot(*, x=None, y=None, hue=None, size=None, style=None, data=None, palette=None, hue_order=None, hue_norm=None, sizes=None, size_order=None, size_norm=None, dashes=True, markers=None, style_order=None, units=None, estimator='mean', ci=95, n_boot=1000, seed=None, sort=True, err_style='band', err_kws=None, legend='auto', ax=None, **kwargs)``` # > By default, the plot aggregates over multiple y values at each value of x and shows an estimate of the central tendency and a confidence interval for that estimate.Passing the entire dataset in long-form mode will aggregate over repeated values (each year) to show the mean and 95% confidence interval: plt.figure(figsize=(12, 6)) sns.lineplot(chocolate_df.review_date, chocolate_df.cocoa_percent) plt.title("Pecentage of Cocoa Used Over Years(2006 - 2020)") # **Summary:** # In 2009, less cocoa amount in chocolate bars was popular, but 71-73% cocoa is generally popular over the years. So, 71-73% cocoa is a safe bet! # *** # ### 4. Rating and Cocoa Percent # Let's see how _cocoa_ amount in chocoloate bars affects its rating. # In this section we explore how the dependence of two variables can be analyzed w.r.t. each other. We can use _joint plot_. A ```jointplot``` augments a bivariate relational or distribution plot with the marginal distributions of the two variables. # In short. we visualize how rating & cocoa amount vary using the ```jointplot``` function from ```seaborn```. # > __Format:__ ```jointplot(x, y[, data, kind, stat_func, ...])``` # > Setting a different ```kind="kde"``` in ```jointplot()``` basically combines two different plots._KDE_ shows the density where the points match up the most . Therefore, It is used to draw a plot of two variables with bivariate and univariate graphs. # > A kernel density estimate (KDE) plot is a method for visualizing the distribution of observations in a dataset, analagous to a histogram. Several other figure-level plotting functions in seaborn make use of the ```histplot()``` and ```kdeplot()``` functions. # > x and y are two strings that are the column names and the data that column contains is used by specifying the data parameter. # here we can see ```cocoa_percent``` on the _y axis_ and ```rating``` on the _x axis_. _Shade of color_ represents the density of values in a region of the graph. plt_s = sns.jointplot(chocolate_df.rating, chocolate_df.cocoa_percent, kind="kde") plt_s.fig.suptitle("Rating and Cocoa Percent") plt_s.ax_joint.collections[0].set_alpha(0) plt_s.fig.tight_layout() plt_s.fig.subplots_adjust(top=0.95) plt_s.fig.set_figwidth(12) plt_s.fig.set_figheight(7) # **Summary** # As noticed in previous section 71-73% cocoa being popular among the years. Here, its proved that this generous amount is a safe bet for decent rating. And it is certainly NOT the case that higher the amount of cocoa, higher the rating, though lesser amount of cocoa than average is also a good risk. # *** # ### 5. Correlation between different columns # To see interdependence between two or more variables, use correlation function ```pandas.DataFrame.corr```. Then we can, check all the correlations simultaneously. # > __Format:__ ```DataFrame.corr(method='pearson', min_periods=1)``` # > Computes pairwise correlation of columns, excluding NA/null values. # > Returns : A DataFrame (Correlation matrix). # > The _Pearson method_ is used by default, but the _Pandas_ allows the use of other indexes. # > * 0.9 to 1 positive or negative indicates a very strong correlation. # > * 0.7 to 0.9 positive or negative indicates a strong correlation. # > * 0.5 to 0.7 positive or negative indicates a moderate correlation. # > * 0.3 to 0.5 positive or negative indicates a weak correlation. # > * 0 to 0.3 positive or negative indicates a negligible corr # To facilitate this visualization of the correlations, it is possible to use the colors. Let's use the ```heatmap``` function in ```seaborn```. # > ```seaborn.heatmap``` # > __Format:__ ```seaborn.heatmap(data, *, vmin=None, vmax=None, cmap=None, center=None, robust=False, annot=None, fmt='.2g', annot_kws=None, linewidths=0, linecolor='white', cbar=True, cbar_kws=None, cbar_ax=None, square=False, xticklabels='auto', yticklabels='auto', mask=None, ax=None, **kwargs)``` # > Plots rectangular data as a color-encoded matrix. # > Annotates each cell with the numeric value using integer formatting. # > The color gradation is observed in relation to the positive and negative correlations. rating_and_composition = chocolate_df[ [ "rating", "cocoa_percent", "counts_of_ingredients", "cocoa_butter", "vanilla", "lecithin", "salt", "sugar", ] ] rating_and_composition plt.figure(figsize=(12, 9)) Chocolate_corr = rating_and_composition.corr() sns.heatmap( Chocolate_corr, xticklabels=Chocolate_corr.columns, yticklabels=Chocolate_corr.columns, annot=True, cmap="YlOrBr", linewidths=0.5, ) plt.title("TITLE") # **Summary** # From the measuring chart in section introduction, we can see _lecithin and cocoa butter;_ or _lecithin and vanilla_ have a weak correlation whereas _vanilla and rating_; _cocoa percent and cocoa butter/lecithin/vanilla_ have negligible correlation. # ## __Asking and Answering Questions__ # Now all that analysis and visualization in previous section have made us more curious about the whole dataset. Let's look into some common questions that comes to the mind and try to solve the same. # ### Q1. How presence of cocoa butter and lecithin effect rating in last three years (2018-2020)? # To lower the viscosity of chocolate and to actually bind the ingredients, both of these element serves this big purpose. But the question is which of these is popular. Well actually cocoa butter is always the better option but its comparatively expensive. Next option is to use both of these in right quantities or use only lecithin to produce cheapest variety of chocolates. In this section, lets check, how these factors effect chocolate bar rating. # We create ```cocoa_or_lecithin_all``` to view the present data in consideration. Then lets create ```cocoa_or_lecithin``` which only has last 3 years data from dataframe ```cocoa_or_lecithin_all```. We move '```cocoa_butter```' and '```lecithin```' to the ```index``` and then ```unstack``` them. This action will assume we have only one (lecithin, cocoa_butter) combination per rating. # ```Stacking``` a DataFrame means moving (also rotating or pivoting) the innermost column index to become the innermost row index and yes as you guessed, the inverse operation is called _unstacking_ which means moving the innermost row index to become the innermost column index again. # Here we will be using visualization function: # ```pandas.DataFrame.plot``` # > __Format:__ ```DataFrame.plot(x=None, y=None, kind='line', ax=None, subplots=False, sharex=None, sharey=False, layout=None, figsize=None, use_index=True, title=None, grid=None, legend=True, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False, **kwds)``` # > Make plots of a Series or a DataFrame. # cocoa_or_lecithin_all = chocolate_df[ ["review_date", "rating", "cocoa_butter", "lecithin"] ].copy() cocoa_or_lecithin = cocoa_or_lecithin_all[ (cocoa_or_lecithin_all.review_date >= 2018) & (cocoa_or_lecithin_all.review_date <= 2020) ].reset_index(drop=True) # cocoa_or_lecithin = cocoa_or_lecithin.set_index('review_date') cocoa_or_lecithin cocoa_or_lecithin.set_index(["cocoa_butter", "lecithin"], append=True, inplace=True) cocoa_or_lecithin_df = ( cocoa_or_lecithin.unstack(["cocoa_butter", "lecithin"]) .xs("rating", axis=1) .plot(figsize=(12, 7), colormap="plasma") ) cocoa_or_lecithin_df.legend( ["Only Cocoa butter", "Both", "None", "Only Lecithin"], prop={"size": 14} ) plt.title("Rating vs Cocoa butter, Lecithin in Chocolates (2018-2020)", fontsize=18) plt.ylabel("Rating", fontsize=14) plt.yticks(fontsize=12) cocoa_or_lecithin_df.set_facecolor("grey") # cocoa_butter, lecithin # True, False (Only Cocoa butter) # True, True (Both) # False, False (None) # False, True (Only Lecithin) # **Summary:** # As we can see here using both cocoa butter and lecithin does equally good. But only adding Cocoa butter does have better chances in gaining good score. # *** # ### Q2. How much cocoa is actually preferred by top companies? # We use ```pandas.DataFrame.max``` here: # > __Format__ ```DataFrame.max(axis=None, skipna=None, level=None, numeric_only=None, **kwargs)``` # > Returns the maximum of the values for the requested axis. top_rated_df = chocolate_df[chocolate_df.rating == chocolate_df.rating.max()] top_rated_df # Let's check percentage of cocoa each of these top companies have. # To plot a histogram, we use ```matplotlib.pyplot.hist``` here. # > __Format:__ ```matplotlib.pyplot.hist(x, bins=None, range=None, density=False, weights=None, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, color=None, label=None, stacked=False, *, data=None, **kwargs)[source]``` plt.figure(figsize=(12, 6)) plt.title("Percentage of Cocoa in Chocolates") plt.xlabel("Percentage of cocoa") plt.ylabel("Number of companies") plt.hist(top_rated_df.cocoa_percent, bins=[30, 50, 60, 70, 85, 99], color="maroon") # **Summary:** # Chocolate is broadly classified by the amount of cocoa it contains. And generally, over 70% cocoa is dark chocolate. Therefore, this implies, many top rated companies have preferred manufacturing dark chocolates over other versions of chocolates. # *** # ### Q3. From which countries, top companies import cocoa beans? # Note: If _x-axis_ labels that are too long for comfortable display, there’s two options in this case – # rotating the labels to make a bit more space, or rotating the entire chart to end up with a horizontal bar chart. # The ```xticks``` function from ```Matplotlib``` is used here, with the rotation. # > The Matplotlib “```xtick```” function is used to rotate the labels on axes, allowing for longer labels when needed. top_bean_countries = top_rated_df.country_of_bean_origin.value_counts() top_bean_countries plt.figure(figsize=(12, 6)) plt.xticks(rotation=75) plt.title("Countries of Bean Origin of Chocolates(In Top Rated)") sns.barplot(top_bean_countries.index, top_bean_countries, palette="viridis") plt.xlabel("Bean Origin(Country)") plt.ylabel("") # **Summary** # It can be seen that most of these countries are _'developing~countries'_. # *** # ### Q4. What must have been the recipe of top rated chocolate in the last year 2019? Also, what tastes are in top rated chocolates during 2016-2020? top_rated_recent = top_rated_df[ (top_rated_df.review_date >= 2016) & (top_rated_df.review_date <= 2020) ] top_rated_recent # *** # #### 2019 recipe: # We have been displaying dataframes. But it gets boring to look into same design every~time. For this special display of _**recipe**_, let's add background color. We will use: # ```df.style.set_properties``` # > By using this, we can use inbuilt functionality to manipulate data frame styling from font color to background color. # > ```DataFrame.style``` property, returns styler object having a number of useful methods for formatting and visualizing the data frames. recipe_df = top_rated_recent[(top_rated_recent.review_date == 2019)] recipe_df = recipe_df[ [ "company", "company_location", "cocoa_percent", "cocoa_butter", "vanilla", "lecithin", "salt", "sugar", "first_taste", "second_taste", "third_taste", ] ] recipe_df = recipe_df.set_index("company") recipe_df.style.set_properties(**{"background-color": "brown", "color": "yellow"}) # *** # #### Tastes in Top Rated Chocolates (2016-2020): # Lets explore Pandas ```Dataframe.plot.bar()``` to see _first, second and third tastes_ preferred by various companies. # There is also ```.plot(kind="bar")``` older syntax; however direct functions for .bar() now exists on the ```DataFrame.plot``` object that act as wrappers around the plotting functions. # The next step for our bar charting journey is to compare. Typically this leads to an “```unstacked```” bar plot, but here we go for ```stacked``` version of the bar plot. The bars at each index point from the unstacked bar chart are literally “```stacked```” on top of one another. # Pandas makes this easy with the “```stacked```” argument for the plot command. # Hence, _The Stacked Bar Chart_ places the values at each sample or index point in the DataFrame on top of one another. # > Stacked bar charts are best for examining patterns in the composition of the totals at each sample point. # plt.style.use("ggplot") df = top_rated_recent[["company", "first_taste", "second_taste", "third_taste"]] cols_to_plot = ["first_taste", "second_taste", "third_taste"] fig, ax = plt.subplots(nrows=len(cols_to_plot), ncols=1, figsize=(15, 26)) for i, col in enumerate(cols_to_plot): plt.subplot(ax[i]) df.groupby([col, "company"]).company.count().unstack().plot.bar( ax=ax[i], legend=True, stacked=True ) plt.xlabel(col, fontsize=14) plt.ylabel("Company", fontsize=14) plt.xticks(fontsize=12, rotation=75) plt.yticks(fontsize=12) plt.legend(fontsize=10) fig.tight_layout(pad=5.0) fig.suptitle("Tastes in Top Rated Chocolates (2016-2020)", fontsize=18) # or plt.suptitle("Tastes in Top Rated Chocolates (2016-2020)"); # **Summary** # Well top recipe does indicate dark chocolate. One of the recipe being using only cocoa butter and sugar over lecithin where it has creamy, fruity & nutty tastes. Another being cocoa butter, sugarfree with fig as the only taste. # While in top tastes, it seems that creamy is pretty popular as first taste, followed by honey as second taste and nutty & cocoa~like in third taste. # *** # ### Q5. What are the major regions of chocolate, companies of which, generally makes it to Top 50? # In short, Countries with most companies in Top 50 # We take average rating of each company here. # **NOTE:** If you need to work with a dataframe after aggregation, use ```as_index=False``` chocolate_df average_rating_df = chocolate_df.groupby( ["company", "company_location"], as_index=False )[["rating"]].mean() average_rating_df top_fifty_df = average_rating_df.sort_values("rating", ascending=False).head(50) top_fifty_df top_countries = top_fifty_df.company_location.value_counts() top_countries plt.figure(figsize=(12, 6)) plt.xticks(rotation=75) plt.title("Countries in Top 50") sns.barplot(top_countries.index, top_countries, palette="hls") plt.xlabel("Country") plt.ylabel("")
[{"chocolate-bar-2020/chocolate.csv": {"column_names": "[\"Unnamed: 0\", \"ref\", \"company\", \"company_location\", \"review_date\", \"country_of_bean_origin\", \"specific_bean_origin_or_bar_name\", \"cocoa_percent\", \"rating\", \"counts_of_ingredients\", \"beans\", \"cocoa_butter\", \"vanilla\", \"lecithin\", \"salt\", \"sugar\", \"sweetener_without_sugar\", \"first_taste\", \"second_taste\", \"third_taste\", \"fourth_taste\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"ref\": \"int64\", \"company\": \"object\", \"company_location\": \"object\", \"review_date\": \"int64\", \"country_of_bean_origin\": \"object\", \"specific_bean_origin_or_bar_name\": \"object\", \"cocoa_percent\": \"float64\", \"rating\": \"float64\", \"counts_of_ingredients\": \"int64\", \"beans\": \"object\", \"cocoa_butter\": \"object\", \"vanilla\": \"object\", \"lecithin\": \"object\", \"salt\": \"object\", \"sugar\": \"object\", \"sweetener_without_sugar\": \"object\", \"first_taste\": \"object\", \"second_taste\": \"object\", \"third_taste\": \"object\", \"fourth_taste\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2224 entries, 0 to 2223\nData columns (total 21 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 2224 non-null int64 \n 1 ref 2224 non-null int64 \n 2 company 2224 non-null object \n 3 company_location 2224 non-null object \n 4 review_date 2224 non-null int64 \n 5 country_of_bean_origin 2224 non-null object \n 6 specific_bean_origin_or_bar_name 2224 non-null object \n 7 cocoa_percent 2224 non-null float64\n 8 rating 2224 non-null float64\n 9 counts_of_ingredients 2224 non-null int64 \n 10 beans 2224 non-null object \n 11 cocoa_butter 2224 non-null object \n 12 vanilla 2224 non-null object \n 13 lecithin 2224 non-null object \n 14 salt 2224 non-null object \n 15 sugar 2224 non-null object \n 16 sweetener_without_sugar 2224 non-null object \n 17 first_taste 2224 non-null object \n 18 second_taste 2147 non-null object \n 19 third_taste 1604 non-null object \n 20 fourth_taste 242 non-null object \ndtypes: float64(2), int64(4), object(15)\nmemory usage: 365.0+ KB\n", "summary": "{\"Unnamed: 0\": {\"count\": 2224.0, \"mean\": 1111.5, \"std\": 642.1578206848117, \"min\": 0.0, \"25%\": 555.75, \"50%\": 1111.5, \"75%\": 1667.25, \"max\": 2223.0}, \"ref\": {\"count\": 2224.0, \"mean\": 1337.0103417266187, \"std\": 693.9597679888277, \"min\": 5.0, \"25%\": 776.0, \"50%\": 1381.0, \"75%\": 1928.0, \"max\": 2490.0}, \"review_date\": {\"count\": 2224.0, \"mean\": 2013.8579136690648, \"std\": 3.582150777434377, \"min\": 2006.0, \"25%\": 2011.0, \"50%\": 2014.0, \"75%\": 2016.0, \"max\": 2020.0}, \"cocoa_percent\": {\"count\": 2224.0, \"mean\": 71.4939298561151, \"std\": 5.2782525177847806, \"min\": 42.0, \"25%\": 70.0, \"50%\": 70.0, \"75%\": 74.0, \"max\": 100.0}, \"rating\": {\"count\": 2224.0, \"mean\": 3.198561151079137, \"std\": 0.43432896919136804, \"min\": 1.0, \"25%\": 3.0, \"50%\": 3.25, \"75%\": 3.5, \"max\": 4.0}, \"counts_of_ingredients\": {\"count\": 2224.0, \"mean\": 3.0759892086330933, \"std\": 0.9298754684085884, \"min\": 1.0, \"25%\": 2.0, \"50%\": 3.0, \"75%\": 4.0, \"max\": 6.0}}", "examples": "{\"Unnamed: 0\":{\"0\":0,\"1\":1,\"2\":2,\"3\":3},\"ref\":{\"0\":2454,\"1\":2458,\"2\":2454,\"3\":797},\"company\":{\"0\":\"5150\",\"1\":\"5150\",\"2\":\"5150\",\"3\":\"A. Morin\"},\"company_location\":{\"0\":\"U.S.A\",\"1\":\"U.S.A\",\"2\":\"U.S.A\",\"3\":\"France\"},\"review_date\":{\"0\":2019,\"1\":2019,\"2\":2019,\"3\":2012},\"country_of_bean_origin\":{\"0\":\"Madagascar\",\"1\":\"Dominican republic\",\"2\":\"Tanzania\",\"3\":\"Peru\"},\"specific_bean_origin_or_bar_name\":{\"0\":\"Bejofo Estate, batch 1\",\"1\":\"Zorzal, batch 1\",\"2\":\"Kokoa Kamili, batch 1\",\"3\":\"Peru\"},\"cocoa_percent\":{\"0\":76.0,\"1\":76.0,\"2\":76.0,\"3\":63.0},\"rating\":{\"0\":3.75,\"1\":3.5,\"2\":3.25,\"3\":3.75},\"counts_of_ingredients\":{\"0\":3,\"1\":3,\"2\":3,\"3\":4},\"beans\":{\"0\":\"have_bean\",\"1\":\"have_bean\",\"2\":\"have_bean\",\"3\":\"have_bean\"},\"cocoa_butter\":{\"0\":\"have_cocoa_butter\",\"1\":\"have_cocoa_butter\",\"2\":\"have_cocoa_butter\",\"3\":\"have_cocoa_butter\"},\"vanilla\":{\"0\":\"have_not_vanila\",\"1\":\"have_not_vanila\",\"2\":\"have_not_vanila\",\"3\":\"have_not_vanila\"},\"lecithin\":{\"0\":\"have_not_lecithin\",\"1\":\"have_not_lecithin\",\"2\":\"have_not_lecithin\",\"3\":\"have_lecithin\"},\"salt\":{\"0\":\"have_not_salt\",\"1\":\"have_not_salt\",\"2\":\"have_not_salt\",\"3\":\"have_not_salt\"},\"sugar\":{\"0\":\"have_sugar\",\"1\":\"have_sugar\",\"2\":\"have_sugar\",\"3\":\"have_sugar\"},\"sweetener_without_sugar\":{\"0\":\"have_not_sweetener_without_sugar\",\"1\":\"have_not_sweetener_without_sugar\",\"2\":\"have_not_sweetener_without_sugar\",\"3\":\"have_not_sweetener_without_sugar\"},\"first_taste\":{\"0\":\"cocoa\",\"1\":\"cocoa\",\"2\":\"rich cocoa\",\"3\":\"fruity\"},\"second_taste\":{\"0\":\"blackberry\",\"1\":\"vegetal\",\"2\":\"fatty\",\"3\":\"melon\"},\"third_taste\":{\"0\":\"full body\",\"1\":\"savory\",\"2\":\"bready\",\"3\":\"roasty\"},\"fourth_taste\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null}}"}}]
true
1
<start_data_description><data_path>chocolate-bar-2020/chocolate.csv: <column_names> ['Unnamed: 0', 'ref', 'company', 'company_location', 'review_date', 'country_of_bean_origin', 'specific_bean_origin_or_bar_name', 'cocoa_percent', 'rating', 'counts_of_ingredients', 'beans', 'cocoa_butter', 'vanilla', 'lecithin', 'salt', 'sugar', 'sweetener_without_sugar', 'first_taste', 'second_taste', 'third_taste', 'fourth_taste'] <column_types> {'Unnamed: 0': 'int64', 'ref': 'int64', 'company': 'object', 'company_location': 'object', 'review_date': 'int64', 'country_of_bean_origin': 'object', 'specific_bean_origin_or_bar_name': 'object', 'cocoa_percent': 'float64', 'rating': 'float64', 'counts_of_ingredients': 'int64', 'beans': 'object', 'cocoa_butter': 'object', 'vanilla': 'object', 'lecithin': 'object', 'salt': 'object', 'sugar': 'object', 'sweetener_without_sugar': 'object', 'first_taste': 'object', 'second_taste': 'object', 'third_taste': 'object', 'fourth_taste': 'object'} <dataframe_Summary> {'Unnamed: 0': {'count': 2224.0, 'mean': 1111.5, 'std': 642.1578206848117, 'min': 0.0, '25%': 555.75, '50%': 1111.5, '75%': 1667.25, 'max': 2223.0}, 'ref': {'count': 2224.0, 'mean': 1337.0103417266187, 'std': 693.9597679888277, 'min': 5.0, '25%': 776.0, '50%': 1381.0, '75%': 1928.0, 'max': 2490.0}, 'review_date': {'count': 2224.0, 'mean': 2013.8579136690648, 'std': 3.582150777434377, 'min': 2006.0, '25%': 2011.0, '50%': 2014.0, '75%': 2016.0, 'max': 2020.0}, 'cocoa_percent': {'count': 2224.0, 'mean': 71.4939298561151, 'std': 5.2782525177847806, 'min': 42.0, '25%': 70.0, '50%': 70.0, '75%': 74.0, 'max': 100.0}, 'rating': {'count': 2224.0, 'mean': 3.198561151079137, 'std': 0.43432896919136804, 'min': 1.0, '25%': 3.0, '50%': 3.25, '75%': 3.5, 'max': 4.0}, 'counts_of_ingredients': {'count': 2224.0, 'mean': 3.0759892086330933, 'std': 0.9298754684085884, 'min': 1.0, '25%': 2.0, '50%': 3.0, '75%': 4.0, 'max': 6.0}} <dataframe_info> RangeIndex: 2224 entries, 0 to 2223 Data columns (total 21 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 2224 non-null int64 1 ref 2224 non-null int64 2 company 2224 non-null object 3 company_location 2224 non-null object 4 review_date 2224 non-null int64 5 country_of_bean_origin 2224 non-null object 6 specific_bean_origin_or_bar_name 2224 non-null object 7 cocoa_percent 2224 non-null float64 8 rating 2224 non-null float64 9 counts_of_ingredients 2224 non-null int64 10 beans 2224 non-null object 11 cocoa_butter 2224 non-null object 12 vanilla 2224 non-null object 13 lecithin 2224 non-null object 14 salt 2224 non-null object 15 sugar 2224 non-null object 16 sweetener_without_sugar 2224 non-null object 17 first_taste 2224 non-null object 18 second_taste 2147 non-null object 19 third_taste 1604 non-null object 20 fourth_taste 242 non-null object dtypes: float64(2), int64(4), object(15) memory usage: 365.0+ KB <some_examples> {'Unnamed: 0': {'0': 0, '1': 1, '2': 2, '3': 3}, 'ref': {'0': 2454, '1': 2458, '2': 2454, '3': 797}, 'company': {'0': '5150', '1': '5150', '2': '5150', '3': 'A. Morin'}, 'company_location': {'0': 'U.S.A', '1': 'U.S.A', '2': 'U.S.A', '3': 'France'}, 'review_date': {'0': 2019, '1': 2019, '2': 2019, '3': 2012}, 'country_of_bean_origin': {'0': 'Madagascar', '1': 'Dominican republic', '2': 'Tanzania', '3': 'Peru'}, 'specific_bean_origin_or_bar_name': {'0': 'Bejofo Estate, batch 1', '1': 'Zorzal, batch 1', '2': 'Kokoa Kamili, batch 1', '3': 'Peru'}, 'cocoa_percent': {'0': 76.0, '1': 76.0, '2': 76.0, '3': 63.0}, 'rating': {'0': 3.75, '1': 3.5, '2': 3.25, '3': 3.75}, 'counts_of_ingredients': {'0': 3, '1': 3, '2': 3, '3': 4}, 'beans': {'0': 'have_bean', '1': 'have_bean', '2': 'have_bean', '3': 'have_bean'}, 'cocoa_butter': {'0': 'have_cocoa_butter', '1': 'have_cocoa_butter', '2': 'have_cocoa_butter', '3': 'have_cocoa_butter'}, 'vanilla': {'0': 'have_not_vanila', '1': 'have_not_vanila', '2': 'have_not_vanila', '3': 'have_not_vanila'}, 'lecithin': {'0': 'have_not_lecithin', '1': 'have_not_lecithin', '2': 'have_not_lecithin', '3': 'have_lecithin'}, 'salt': {'0': 'have_not_salt', '1': 'have_not_salt', '2': 'have_not_salt', '3': 'have_not_salt'}, 'sugar': {'0': 'have_sugar', '1': 'have_sugar', '2': 'have_sugar', '3': 'have_sugar'}, 'sweetener_without_sugar': {'0': 'have_not_sweetener_without_sugar', '1': 'have_not_sweetener_without_sugar', '2': 'have_not_sweetener_without_sugar', '3': 'have_not_sweetener_without_sugar'}, 'first_taste': {'0': 'cocoa', '1': 'cocoa', '2': 'rich cocoa', '3': 'fruity'}, 'second_taste': {'0': 'blackberry', '1': 'vegetal', '2': 'fatty', '3': 'melon'}, 'third_taste': {'0': 'full body', '1': 'savory', '2': 'bready', '3': 'roasty'}, 'fourth_taste': {'0': None, '1': None, '2': None, '3': None}} <end_description>
8,502
0
10,737
8,502
69248036
# ## MLB data and API for forecasting # Starting to get the data # From the documentation and exemple # https://www.kaggle.com/chumajin/eda-of-mlb-for-starter-english-ver # https://www.kaggle.com/ryanholbrook/getting-started-with-mlb-player-digital-engagement # -I build 2 pickles files from the train.csv file import numpy as np import pandas as pd import os from joblib import Parallel, delayed from pathlib import Path from datetime import datetime, timedelta import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error, mean_absolute_error from IPython.display import Image Image("../input/pictures/johnny_automatic_old_time_pitcher.jpg") import pickle5 as pickle with open("../input/pickle-data/df_nextDayPlayer.pkl", "rb") as handle: df_nextDayPlayer = pickle.load(handle) with open("../input/pickle-data/playerBoxScores.pkl", "rb") as handle: df_playerBoxScores = pickle.load(handle) df_playerBoxScores.head(n=2) df_nextDayPlayer.head(n=2) print(df_playerBoxScores.shape) print((df_nextDayPlayer.shape)) # --------------------------prepare data sets------------------------------- df_nextDayPlayer.nunique() # # Data Player Box Scores df_playerBoxScores.info() targets = ["target1", "target2", "target3", "target4"] features = [ "hits", "doubles", "triples", "strikeOuts", "homeRuns", "atBats", "runsScored", "stolenBases", "caughtStealing", "strikeOutsPitching", "inningsPitched", "strikes", "flyOuts", "groundOuts", "errors", "chances", "TotalBase", "BattingAvg", ] # ----------------------adding calcul columns---------------------------- # Adding Total bats columns df_playerBoxScores["TotalBase"] = ( df_playerBoxScores["hits"] + (2 * df_playerBoxScores["doubles"]) + (3 * df_playerBoxScores["triples"]) + (4 * df_playerBoxScores["homeRuns"]) ) # Adding Batting Average df_playerBoxScores["BattingAvg"] = ( df_playerBoxScores["hits"] / df_playerBoxScores["atBats"] ) df_playerBoxScores.shape # ----------prepare data player Box scores---------------------------------- df_playerBoxScores.rename(columns={"gameDate": "date"}, inplace=True) df_playerBoxScores = df_playerBoxScores[["date", "playerId"] + features] # Set dtypes df_playerBoxScores = df_playerBoxScores.astype({name: np.float32 for name in features}) df_playerBoxScores = df_playerBoxScores.astype({"playerId": str}) df_playerBoxScores.groupby(["date", "playerId"], as_index=False).sum() # # exploring data with some player Id # Data seasonalite---------------------------------------------------------------- season_Global_2020 = df_playerBoxScores[ df_playerBoxScores.date.between("2018-01-02", "2020-12-31") ] season_Global_2020 = season_Global_2020.sort_values(by="BattingAvg", ascending=False) season_Global_2020 = season_Global_2020.sort_values(by="date") season_Global_2020.groupby(["playerId", "date"], axis=0).sum() season_Global_2020_02 = season_Global_2020[season_Global_2020["playerId"] == "543105"] # ploting some performances fig, ax = plt.subplots(figsize=(24, 7)) ax.plot( season_Global_2020_02.date, season_Global_2020_02.BattingAvg, label="BattingAvg", fillstyle="full", linewidth=2, ) ax.plot( season_Global_2020_02.date, season_Global_2020_02.homeRuns, label="homeRuns", fillstyle="full", linewidth=2, ) plt.xlabel("date", labelpad=10) plt.title("playerId 543105") # plt.xticks(rotation=45) plt.xticks([]) plt.legend(frameon=False) plt.show() # ploting some performances fig, ax = plt.subplots(figsize=(24, 7)) ax.plot( season_Global_2020_02.date, season_Global_2020_02.runsScored, label="runsScored", fillstyle="full", linewidth=2, ) plt.xlabel("date", labelpad=10) plt.title("playerId 543105") # plt.xticks(rotation=45) plt.xticks([]) plt.legend(frameon=False) plt.show() # Next step is to prepare next day player to build a global data frame # Image("../input/pictures/johnny_automatic_baseball_at_bat.jpg") # # Data Next Day Player # ---------------Prepare next Day Player data------------------------------------------------------------------------------------ df_nextDayPlayer.rename(columns={"engagementMetricsDate": "date"}, inplace=True) df_nextDayPlayer = df_nextDayPlayer.astype({name: np.float32 for name in targets}) df_nextDayPlayer = df_nextDayPlayer.astype({"playerId": str}) df_nextDayPlayer = df_nextDayPlayer.groupby(["date", "playerId"], as_index=False).sum() df_nextDayPlayer.head() df_nextDayPlayer.iloc[:, 2] # ----------------------------------------------Data seasonalite---------------------------------------------------------------- season_nextdayPlayer_2020 = df_nextDayPlayer[ df_nextDayPlayer.date.between("2019-01-01", "2021-12-31") ] season_nextdayPlayer_2020 = season_nextdayPlayer_2020.sort_values( by="target1", ascending=False ) season_nextdayPlayer_2020 = season_nextdayPlayer_2020.sort_values(by="date") season_nextdayPlayer_02 = season_nextdayPlayer_2020[ (season_nextdayPlayer_2020["playerId"] == "543105") ] season_nextdayPlayer_03 = season_nextdayPlayer_2020[ (season_nextdayPlayer_2020["playerId"] == "282332") ] season_nextdayPlayer_04 = season_nextdayPlayer_2020[ (season_nextdayPlayer_2020["playerId"] == "516969") ] # # Understand the correlation between targets with differents player Id # Heatmap1 player Id : 543105 plt.figure(figsize=(17, 11)) # heatmap1 corr = season_nextdayPlayer_02.corr() cmap = sns.diverging_palette(230, 20, as_cmap=True) plt.subplot(1, 3, 1) sns.heatmap( corr, center=0, annot=True, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5} ) plt.title("Heatmap player Id : 543105") # heatmap2 player Id : 282332 corr02 = season_nextdayPlayer_04.corr() plt.subplot(1, 3, 2) cmap = sns.diverging_palette(230, 20, as_cmap=True) sns.heatmap( corr02, center=0, annot=True, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5} ) plt.title("Heatmap player Id : 282332") # heatmap3 player Id : 516969 corr03 = season_nextdayPlayer_03.corr() plt.subplot(1, 3, 3) cmap = sns.diverging_palette(230, 20, as_cmap=True) sns.heatmap( corr03, center=0, annot=True, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5} ) plt.title("Heatmap player Id : 516969") plt.show() fig, ax = plt.subplots(figsize=(24, 7)) ax.plot( season_nextdayPlayer_02.date, season_nextdayPlayer_02.target1, label="target1", fillstyle="full", linewidth=2, ) ax.plot( season_nextdayPlayer_02.date, season_nextdayPlayer_02.target2, label="target2", fillstyle="full", linewidth=2, ) ax.plot( season_nextdayPlayer_02.date, season_nextdayPlayer_02.target3, label="target3", fillstyle="full", linewidth=2, ) ax.plot( season_nextdayPlayer_02.date, season_nextdayPlayer_02.target4, label="target4", fillstyle="full", linewidth=2, ) plt.xlabel("date", labelpad=10) plt.title("playerId 543105 and targets") plt.xticks([]) plt.legend(frameon=False) plt.show() season_nextdayPlayer_02["target1"][-30:].plot( figsize=(12, 4), label="target1", fillstyle="full", linewidth=2 ) season_nextdayPlayer_02["target2"][-30:].plot( figsize=(12, 4), label="target2", fillstyle="full", linewidth=2 ) season_nextdayPlayer_02["target3"][-30:].plot( figsize=(12, 4), label="target3", fillstyle="full", linewidth=2 ) season_nextdayPlayer_02["target4"][-30:].plot( figsize=(12, 4), label="target4", fillstyle="full", linewidth=2 ) plt.title("playerId 543105 and targets") plt.legend(frameon=False) plt.show() # Making some graph to understand the way he seasonality can be different for player or season from statsmodels.tsa.seasonal import STL # function for trend seasonal and resid plot def add_stl_plot(fig, res, legend): axs = fig.get_axes() comps = ["trend", "seasonal", "resid"] for ax, comp in zip(axs[1:], comps): series = getattr(res, comp) if comp == "resid": ax.plot(series, marker="o", linestyle="none") else: ax.plot(series) if comp == "trend": ax.legend(legend, frameon=False) from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() sns.set_style("darkgrid") plt.rc("figure", figsize=(16, 12)) plt.rc("font", size=13) plt.figure(figsize=(17, 11)) stl = STL(season_nextdayPlayer_02.target1, period=7, robust=True) res_robust = stl.fit() fig = res_robust.plot() res_non_robust = STL(season_nextdayPlayer_02.target1, period=7, robust=False).fit() add_stl_plot(fig, res_non_robust, ["Robust", "Non-robust"]) plt.title("playerId 543105 seasonal trend and resid") plt.show() from statsmodels.graphics.tsaplots import plot_acf, plot_pacf fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(211) fig = plot_acf(season_nextdayPlayer_02["target2"][-395:], lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = plot_pacf(season_nextdayPlayer_02["target2"][-395:], lags=40, ax=ax2) # Using Regular season start and end date season_OnedayPlayer_2020 = df_nextDayPlayer[ df_nextDayPlayer.date.between("2021-02-28", "2021-10-31") ] # exeample for one player season_OnedayPlayer_2020 = season_OnedayPlayer_2020[ (season_OnedayPlayer_2020["playerId"] == "282332") ] season_OnedayPlayer_2020.isnull().sum() series = np.log(season_OnedayPlayer_2020["target2"]) series01 = np.log(season_OnedayPlayer_2020["target1"]) series03 = np.log(season_OnedayPlayer_2020["target3"]) series04 = np.log(season_OnedayPlayer_2020["target4"]) plt.figure(figsize=(12, 8)) plt.plot(series, label="Series target 2") plt.plot(series01, label="Series target 1") plt.plot(series03, label="Series target 3") plt.plot(series04, label="Series target 4") plt.legend() plt.show() # ## SARIMA for one player for season 2020 from statsmodels.tsa.statespace.sarimax import SARIMAX size = int(len(season_OnedayPlayer_2020["target2"].dropna()) * 0.75) train, test = ( season_OnedayPlayer_2020["target2"].dropna()[0:size], season_OnedayPlayer_2020["target2"].dropna()[ size : len(season_nextdayPlayer_02["target2"].dropna()) ], ) test = test.reset_index()["target2"] history = [x for x in train] predictions = list() import warnings warnings.filterwarnings("ignore") for t in range(len(test)): model = SARIMAX(history, order=(1, 1, 1), seasonal_order=(1, 1, 1, 12)) model_fit = model.fit(disp=False) output = model_fit.forecast() yhat = output[0] predictions.append(yhat) obs = test[t] history.append(yhat) history = [x for x in train] plt.figure(figsize=(12, 8)) plt.plot(np.concatenate([history, predictions]), label="Prediction") plt.plot(np.concatenate([history, test]), label="Test") plt.title("prediction for palyer Id 282332 and target2") plt.legend() plt.show() mean_squared_error(predictions, test) # # building a model with LSTM # # Using sequence of targets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import cross_val_score from sklearn.feature_selection import SelectFromModel from sklearn import metrics from keras.layers import LSTM from keras.layers import Dropout from keras.callbacks import EarlyStopping from statsmodels.tsa.seasonal import STL from math import sqrt import tensorflow as tf import keras from keras import layers from keras import Model from keras.layers import Dense from keras.models import Sequential from keras.layers import Input, Dense, concatenate df_nextDayPlayer["date"] = pd.to_datetime(df_nextDayPlayer["date"], format="%Y-%m-%d") df_nextDayPlayer = df_nextDayPlayer.set_index("date").to_period("D") print(df_nextDayPlayer.info()) # ## Starting to make some preprocessing to make a train data # ## and scaling values for supervised learning # convert series to supervised learning from pandas import concat from pandas import DataFrame def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = DataFrame(data) cols, names = list(), list() # input sequence (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [("var%d(t-%d)" % (j + 1, i)) for j in range(n_vars)] # forecast sequence (t, t+1, ... t+n) for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [("var%d(t)" % (j + 1)) for j in range(n_vars)] else: names += [("var%d(t+%d)" % (j + 1, i)) for j in range(n_vars)] # put it all together agg = concat(cols, axis=1) agg.columns = names # drop rows with NaN values if dropnan: agg.dropna(inplace=True) return agg df_nextDayPlayer["playerId"].nunique() cols = ["target1", "target2", "target3", "target4"] # using scaling for sampling the trainning set df_train = df_nextDayPlayer[cols].values scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(df_train) # frame as supervised learning reframed = series_to_supervised(scaled, 1, 1) # drop columns we don't want to predict reframed.drop(reframed.columns[[2, 3, 4, 5]], axis=1, inplace=True) print(reframed.head()) values = reframed.values seq_time = 365 * 2061 train = values[:seq_time, :] test = values[seq_time:, :] # split into input and outputs X_train, y_train = train[:, :-1], train[:, -1] X_test, y_test = test[:, :-1], test[:, -1] # reshape input to be [samples, timesteps, features] X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1])) X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1])) print(X_train.shape, y_train.shape, X_test.shape, y_test.shape) # ### Using LSTM model with inputs and outputs model = Sequential() model.add( LSTM( 100, input_shape=(X_train.shape[1], X_train.shape[2]), activation="relu", bias_initializer="zeros", return_sequences=True, ) ) model.add(Dropout(0.2)) model.add(LSTM(50, activation="relu", bias_initializer="zeros", return_sequences=True)) model.add(LSTM(10)) model.add(Dense(32, kernel_regularizer=tf.keras.regularizers.l2(0.01))) model.add(Dense(1, activation="sigmoid")) model.compile(optimizer="adam", loss="mean_squared_error", metrics="mae") history = model.fit( X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test) ) # ### Test prediction and inverse # plot history plt.figure(figsize=(12, 8)) plt.plot(history.history["loss"], label="train") plt.plot(history.history["val_loss"], label="test") plt.legend() plt.show() # Evaluation of the model loss, mean_squared_error = model.evaluate(X_test, y_test, verbose=0) print("loss is:", loss) print("mean squared error is:", mean_squared_error) # save model model.save("model.h5") test_predict = model.predict(X_test) test_X = X_test.reshape((X_test.shape[0], X_test.shape[2])) # invert scaling for forecast inv_yhat = np.concatenate((test_predict, test_X[:, -4:]), axis=1) inv_yhat = scaler.inverse_transform(inv_yhat) inv_yhat = inv_yhat[:, 0] # invert scaling for actual y_test = y_test.reshape((len(y_test), 1)) inv_y = np.concatenate((y_test, test_X[:, -4:]), axis=1) inv_y = scaler.inverse_transform(inv_y) inv_y = inv_y[:, 0] plt.figure(figsize=(12, 8)) aa = [x for x in range(365)] plt.plot(aa, inv_y[:365], marker=".", label="actual") plt.plot(aa, inv_yhat[:365], "r", label="prediction") plt.ylabel("player activity", size=15) plt.xlabel("Time step", size=15) plt.legend(fontsize=15) plt.show() # ### to make the submission i use the example of notebook # https://www.kaggle.com/ruriarmandhani/mlb-submission # https://www.kaggle.com/ulrich07/mlb-debug-ann # model = keras.models.load_model("../input/modelplace/model .h5") # -----------------------------Next day Player-------------------------------------- with open("../input/pickle-data/df_nextDayPlayer.pkl", "rb") as handle: df_nextDayPlayer = pickle.load(handle) df_nextDayPlayer.rename(columns={"engagementMetricsDate": "date"}, inplace=True) df_nextDayPlayer["date"] = pd.to_datetime(df_nextDayPlayer["date"], format="%Y-%m-%d") # df_nextDayPlayer = df_nextDayPlayer.set_index('date').to_period('D') # -----------------------------------------df_test-------------------------------------------- df_test = pd.read_csv( "../input/mlb-player-digital-engagement-forecasting/example_sample_submission.csv" ) df_test = df_test.reset_index() df_test["date"] = pd.to_datetime(df_test["date"], format="%Y%m%d") df_test["playerId"] = ( df_test["date_playerId"].apply(lambda x: x.split("_")[1]).astype(int) ) # --------Sample median data-------- lag = 100 for x in range(lag): df_test["date"] = df_test["date"] - timedelta(days=1) df_test = df_test.merge( df_nextDayPlayer, how="left", on=["date", "playerId"], suffixes=["", f"_{x+1}"] ) df_test = df_test.fillna(0.0) for x in range(4): columns = [f"target{x+1}_{i+1}" for i in range(lag)] df_test[f"target{x+1}_median"] = df_test[columns].median(axis=1) df_test = df_test.drop(columns=columns) df_test targets_cols = [ "playerId", "target1_median", "target2_median", "target3_median", "target4_median", ] df_compose = df_test[targets_cols] df_compose = df_compose.rename( columns={ "target1_median": "target1", "target2_median": "target2", "target3_median": "target3", "target4_median": "target4", } ) df_compose # ### Sample data on mlb env import mlb env = mlb.make_env() # initialize the environment iter_test = env.iter_test() # iterator which loops over each date in test set lag = 100 targets_median = [ "target1_median", "target2_median", "target3_median", "target4_median", ] for test_df, sample_prediction_df in iter_test: # ----sample data--------------------------- df_train = df_test[targets_median].values scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(df_train) seq_time = 365 * 4 test = scaled[:seq_time, :] train = scaled[seq_time:, :] X_train, y_train = train[:, :-1], train[:, -1] X_test, y_test = test[:, :-1], test[:, -1] X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1])) X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1])) # ----make predict---- model = keras.models.load_model("../input/modelplace/model .h5") pred = model.predict(X_test) # ----sample for submission----- X_test = X_test.reshape((X_test.shape[0], X_test.shape[2])) pred = np.concatenate((pred, X_test[:, -4:]), axis=1) pred = pd.DataFrame(pred, columns=["target1", "target2", "target3", "target4"]) df_sample = df_compose.join(pred, lsuffix="_left") df_sample = df_sample.drop( columns=["target1_left", "target2_left", "target3_left", "target4_left"] ) # ------sample_prediction----- sample_prediction_df = sample_prediction_df.reset_index() sample_prediction_df["date"] = pd.to_datetime( sample_prediction_df["date"], format="%Y%m%d" ) sample_prediction_df["playerId"] = ( sample_prediction_df["date_playerId"] .apply(lambda x: x.split("_")[1]) .astype(int) ) sample_prediction_df = sample_prediction_df.drop( columns=["target1", "target2", "target3", "target4"] ) sample_prediction_df = pd.merge( sample_prediction_df, df_sample, how="outer", on="playerId" ) sample_prediction_df = sample_prediction_df.fillna(0.0) env.predict(sample_prediction_df) sample_prediction_df
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/248/69248036.ipynb
null
null
[{"Id": 69248036, "ScriptId": 18741045, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6386129, "CreationDate": "07/28/2021 15:36:13", "VersionNumber": 28.0, "Title": "notebookce5f7e9cef", "EvaluationDate": "07/28/2021", "IsChange": false, "TotalLines": 521.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 521.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## MLB data and API for forecasting # Starting to get the data # From the documentation and exemple # https://www.kaggle.com/chumajin/eda-of-mlb-for-starter-english-ver # https://www.kaggle.com/ryanholbrook/getting-started-with-mlb-player-digital-engagement # -I build 2 pickles files from the train.csv file import numpy as np import pandas as pd import os from joblib import Parallel, delayed from pathlib import Path from datetime import datetime, timedelta import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error, mean_absolute_error from IPython.display import Image Image("../input/pictures/johnny_automatic_old_time_pitcher.jpg") import pickle5 as pickle with open("../input/pickle-data/df_nextDayPlayer.pkl", "rb") as handle: df_nextDayPlayer = pickle.load(handle) with open("../input/pickle-data/playerBoxScores.pkl", "rb") as handle: df_playerBoxScores = pickle.load(handle) df_playerBoxScores.head(n=2) df_nextDayPlayer.head(n=2) print(df_playerBoxScores.shape) print((df_nextDayPlayer.shape)) # --------------------------prepare data sets------------------------------- df_nextDayPlayer.nunique() # # Data Player Box Scores df_playerBoxScores.info() targets = ["target1", "target2", "target3", "target4"] features = [ "hits", "doubles", "triples", "strikeOuts", "homeRuns", "atBats", "runsScored", "stolenBases", "caughtStealing", "strikeOutsPitching", "inningsPitched", "strikes", "flyOuts", "groundOuts", "errors", "chances", "TotalBase", "BattingAvg", ] # ----------------------adding calcul columns---------------------------- # Adding Total bats columns df_playerBoxScores["TotalBase"] = ( df_playerBoxScores["hits"] + (2 * df_playerBoxScores["doubles"]) + (3 * df_playerBoxScores["triples"]) + (4 * df_playerBoxScores["homeRuns"]) ) # Adding Batting Average df_playerBoxScores["BattingAvg"] = ( df_playerBoxScores["hits"] / df_playerBoxScores["atBats"] ) df_playerBoxScores.shape # ----------prepare data player Box scores---------------------------------- df_playerBoxScores.rename(columns={"gameDate": "date"}, inplace=True) df_playerBoxScores = df_playerBoxScores[["date", "playerId"] + features] # Set dtypes df_playerBoxScores = df_playerBoxScores.astype({name: np.float32 for name in features}) df_playerBoxScores = df_playerBoxScores.astype({"playerId": str}) df_playerBoxScores.groupby(["date", "playerId"], as_index=False).sum() # # exploring data with some player Id # Data seasonalite---------------------------------------------------------------- season_Global_2020 = df_playerBoxScores[ df_playerBoxScores.date.between("2018-01-02", "2020-12-31") ] season_Global_2020 = season_Global_2020.sort_values(by="BattingAvg", ascending=False) season_Global_2020 = season_Global_2020.sort_values(by="date") season_Global_2020.groupby(["playerId", "date"], axis=0).sum() season_Global_2020_02 = season_Global_2020[season_Global_2020["playerId"] == "543105"] # ploting some performances fig, ax = plt.subplots(figsize=(24, 7)) ax.plot( season_Global_2020_02.date, season_Global_2020_02.BattingAvg, label="BattingAvg", fillstyle="full", linewidth=2, ) ax.plot( season_Global_2020_02.date, season_Global_2020_02.homeRuns, label="homeRuns", fillstyle="full", linewidth=2, ) plt.xlabel("date", labelpad=10) plt.title("playerId 543105") # plt.xticks(rotation=45) plt.xticks([]) plt.legend(frameon=False) plt.show() # ploting some performances fig, ax = plt.subplots(figsize=(24, 7)) ax.plot( season_Global_2020_02.date, season_Global_2020_02.runsScored, label="runsScored", fillstyle="full", linewidth=2, ) plt.xlabel("date", labelpad=10) plt.title("playerId 543105") # plt.xticks(rotation=45) plt.xticks([]) plt.legend(frameon=False) plt.show() # Next step is to prepare next day player to build a global data frame # Image("../input/pictures/johnny_automatic_baseball_at_bat.jpg") # # Data Next Day Player # ---------------Prepare next Day Player data------------------------------------------------------------------------------------ df_nextDayPlayer.rename(columns={"engagementMetricsDate": "date"}, inplace=True) df_nextDayPlayer = df_nextDayPlayer.astype({name: np.float32 for name in targets}) df_nextDayPlayer = df_nextDayPlayer.astype({"playerId": str}) df_nextDayPlayer = df_nextDayPlayer.groupby(["date", "playerId"], as_index=False).sum() df_nextDayPlayer.head() df_nextDayPlayer.iloc[:, 2] # ----------------------------------------------Data seasonalite---------------------------------------------------------------- season_nextdayPlayer_2020 = df_nextDayPlayer[ df_nextDayPlayer.date.between("2019-01-01", "2021-12-31") ] season_nextdayPlayer_2020 = season_nextdayPlayer_2020.sort_values( by="target1", ascending=False ) season_nextdayPlayer_2020 = season_nextdayPlayer_2020.sort_values(by="date") season_nextdayPlayer_02 = season_nextdayPlayer_2020[ (season_nextdayPlayer_2020["playerId"] == "543105") ] season_nextdayPlayer_03 = season_nextdayPlayer_2020[ (season_nextdayPlayer_2020["playerId"] == "282332") ] season_nextdayPlayer_04 = season_nextdayPlayer_2020[ (season_nextdayPlayer_2020["playerId"] == "516969") ] # # Understand the correlation between targets with differents player Id # Heatmap1 player Id : 543105 plt.figure(figsize=(17, 11)) # heatmap1 corr = season_nextdayPlayer_02.corr() cmap = sns.diverging_palette(230, 20, as_cmap=True) plt.subplot(1, 3, 1) sns.heatmap( corr, center=0, annot=True, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5} ) plt.title("Heatmap player Id : 543105") # heatmap2 player Id : 282332 corr02 = season_nextdayPlayer_04.corr() plt.subplot(1, 3, 2) cmap = sns.diverging_palette(230, 20, as_cmap=True) sns.heatmap( corr02, center=0, annot=True, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5} ) plt.title("Heatmap player Id : 282332") # heatmap3 player Id : 516969 corr03 = season_nextdayPlayer_03.corr() plt.subplot(1, 3, 3) cmap = sns.diverging_palette(230, 20, as_cmap=True) sns.heatmap( corr03, center=0, annot=True, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5} ) plt.title("Heatmap player Id : 516969") plt.show() fig, ax = plt.subplots(figsize=(24, 7)) ax.plot( season_nextdayPlayer_02.date, season_nextdayPlayer_02.target1, label="target1", fillstyle="full", linewidth=2, ) ax.plot( season_nextdayPlayer_02.date, season_nextdayPlayer_02.target2, label="target2", fillstyle="full", linewidth=2, ) ax.plot( season_nextdayPlayer_02.date, season_nextdayPlayer_02.target3, label="target3", fillstyle="full", linewidth=2, ) ax.plot( season_nextdayPlayer_02.date, season_nextdayPlayer_02.target4, label="target4", fillstyle="full", linewidth=2, ) plt.xlabel("date", labelpad=10) plt.title("playerId 543105 and targets") plt.xticks([]) plt.legend(frameon=False) plt.show() season_nextdayPlayer_02["target1"][-30:].plot( figsize=(12, 4), label="target1", fillstyle="full", linewidth=2 ) season_nextdayPlayer_02["target2"][-30:].plot( figsize=(12, 4), label="target2", fillstyle="full", linewidth=2 ) season_nextdayPlayer_02["target3"][-30:].plot( figsize=(12, 4), label="target3", fillstyle="full", linewidth=2 ) season_nextdayPlayer_02["target4"][-30:].plot( figsize=(12, 4), label="target4", fillstyle="full", linewidth=2 ) plt.title("playerId 543105 and targets") plt.legend(frameon=False) plt.show() # Making some graph to understand the way he seasonality can be different for player or season from statsmodels.tsa.seasonal import STL # function for trend seasonal and resid plot def add_stl_plot(fig, res, legend): axs = fig.get_axes() comps = ["trend", "seasonal", "resid"] for ax, comp in zip(axs[1:], comps): series = getattr(res, comp) if comp == "resid": ax.plot(series, marker="o", linestyle="none") else: ax.plot(series) if comp == "trend": ax.legend(legend, frameon=False) from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() sns.set_style("darkgrid") plt.rc("figure", figsize=(16, 12)) plt.rc("font", size=13) plt.figure(figsize=(17, 11)) stl = STL(season_nextdayPlayer_02.target1, period=7, robust=True) res_robust = stl.fit() fig = res_robust.plot() res_non_robust = STL(season_nextdayPlayer_02.target1, period=7, robust=False).fit() add_stl_plot(fig, res_non_robust, ["Robust", "Non-robust"]) plt.title("playerId 543105 seasonal trend and resid") plt.show() from statsmodels.graphics.tsaplots import plot_acf, plot_pacf fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(211) fig = plot_acf(season_nextdayPlayer_02["target2"][-395:], lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = plot_pacf(season_nextdayPlayer_02["target2"][-395:], lags=40, ax=ax2) # Using Regular season start and end date season_OnedayPlayer_2020 = df_nextDayPlayer[ df_nextDayPlayer.date.between("2021-02-28", "2021-10-31") ] # exeample for one player season_OnedayPlayer_2020 = season_OnedayPlayer_2020[ (season_OnedayPlayer_2020["playerId"] == "282332") ] season_OnedayPlayer_2020.isnull().sum() series = np.log(season_OnedayPlayer_2020["target2"]) series01 = np.log(season_OnedayPlayer_2020["target1"]) series03 = np.log(season_OnedayPlayer_2020["target3"]) series04 = np.log(season_OnedayPlayer_2020["target4"]) plt.figure(figsize=(12, 8)) plt.plot(series, label="Series target 2") plt.plot(series01, label="Series target 1") plt.plot(series03, label="Series target 3") plt.plot(series04, label="Series target 4") plt.legend() plt.show() # ## SARIMA for one player for season 2020 from statsmodels.tsa.statespace.sarimax import SARIMAX size = int(len(season_OnedayPlayer_2020["target2"].dropna()) * 0.75) train, test = ( season_OnedayPlayer_2020["target2"].dropna()[0:size], season_OnedayPlayer_2020["target2"].dropna()[ size : len(season_nextdayPlayer_02["target2"].dropna()) ], ) test = test.reset_index()["target2"] history = [x for x in train] predictions = list() import warnings warnings.filterwarnings("ignore") for t in range(len(test)): model = SARIMAX(history, order=(1, 1, 1), seasonal_order=(1, 1, 1, 12)) model_fit = model.fit(disp=False) output = model_fit.forecast() yhat = output[0] predictions.append(yhat) obs = test[t] history.append(yhat) history = [x for x in train] plt.figure(figsize=(12, 8)) plt.plot(np.concatenate([history, predictions]), label="Prediction") plt.plot(np.concatenate([history, test]), label="Test") plt.title("prediction for palyer Id 282332 and target2") plt.legend() plt.show() mean_squared_error(predictions, test) # # building a model with LSTM # # Using sequence of targets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import cross_val_score from sklearn.feature_selection import SelectFromModel from sklearn import metrics from keras.layers import LSTM from keras.layers import Dropout from keras.callbacks import EarlyStopping from statsmodels.tsa.seasonal import STL from math import sqrt import tensorflow as tf import keras from keras import layers from keras import Model from keras.layers import Dense from keras.models import Sequential from keras.layers import Input, Dense, concatenate df_nextDayPlayer["date"] = pd.to_datetime(df_nextDayPlayer["date"], format="%Y-%m-%d") df_nextDayPlayer = df_nextDayPlayer.set_index("date").to_period("D") print(df_nextDayPlayer.info()) # ## Starting to make some preprocessing to make a train data # ## and scaling values for supervised learning # convert series to supervised learning from pandas import concat from pandas import DataFrame def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = DataFrame(data) cols, names = list(), list() # input sequence (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [("var%d(t-%d)" % (j + 1, i)) for j in range(n_vars)] # forecast sequence (t, t+1, ... t+n) for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [("var%d(t)" % (j + 1)) for j in range(n_vars)] else: names += [("var%d(t+%d)" % (j + 1, i)) for j in range(n_vars)] # put it all together agg = concat(cols, axis=1) agg.columns = names # drop rows with NaN values if dropnan: agg.dropna(inplace=True) return agg df_nextDayPlayer["playerId"].nunique() cols = ["target1", "target2", "target3", "target4"] # using scaling for sampling the trainning set df_train = df_nextDayPlayer[cols].values scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(df_train) # frame as supervised learning reframed = series_to_supervised(scaled, 1, 1) # drop columns we don't want to predict reframed.drop(reframed.columns[[2, 3, 4, 5]], axis=1, inplace=True) print(reframed.head()) values = reframed.values seq_time = 365 * 2061 train = values[:seq_time, :] test = values[seq_time:, :] # split into input and outputs X_train, y_train = train[:, :-1], train[:, -1] X_test, y_test = test[:, :-1], test[:, -1] # reshape input to be [samples, timesteps, features] X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1])) X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1])) print(X_train.shape, y_train.shape, X_test.shape, y_test.shape) # ### Using LSTM model with inputs and outputs model = Sequential() model.add( LSTM( 100, input_shape=(X_train.shape[1], X_train.shape[2]), activation="relu", bias_initializer="zeros", return_sequences=True, ) ) model.add(Dropout(0.2)) model.add(LSTM(50, activation="relu", bias_initializer="zeros", return_sequences=True)) model.add(LSTM(10)) model.add(Dense(32, kernel_regularizer=tf.keras.regularizers.l2(0.01))) model.add(Dense(1, activation="sigmoid")) model.compile(optimizer="adam", loss="mean_squared_error", metrics="mae") history = model.fit( X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test) ) # ### Test prediction and inverse # plot history plt.figure(figsize=(12, 8)) plt.plot(history.history["loss"], label="train") plt.plot(history.history["val_loss"], label="test") plt.legend() plt.show() # Evaluation of the model loss, mean_squared_error = model.evaluate(X_test, y_test, verbose=0) print("loss is:", loss) print("mean squared error is:", mean_squared_error) # save model model.save("model.h5") test_predict = model.predict(X_test) test_X = X_test.reshape((X_test.shape[0], X_test.shape[2])) # invert scaling for forecast inv_yhat = np.concatenate((test_predict, test_X[:, -4:]), axis=1) inv_yhat = scaler.inverse_transform(inv_yhat) inv_yhat = inv_yhat[:, 0] # invert scaling for actual y_test = y_test.reshape((len(y_test), 1)) inv_y = np.concatenate((y_test, test_X[:, -4:]), axis=1) inv_y = scaler.inverse_transform(inv_y) inv_y = inv_y[:, 0] plt.figure(figsize=(12, 8)) aa = [x for x in range(365)] plt.plot(aa, inv_y[:365], marker=".", label="actual") plt.plot(aa, inv_yhat[:365], "r", label="prediction") plt.ylabel("player activity", size=15) plt.xlabel("Time step", size=15) plt.legend(fontsize=15) plt.show() # ### to make the submission i use the example of notebook # https://www.kaggle.com/ruriarmandhani/mlb-submission # https://www.kaggle.com/ulrich07/mlb-debug-ann # model = keras.models.load_model("../input/modelplace/model .h5") # -----------------------------Next day Player-------------------------------------- with open("../input/pickle-data/df_nextDayPlayer.pkl", "rb") as handle: df_nextDayPlayer = pickle.load(handle) df_nextDayPlayer.rename(columns={"engagementMetricsDate": "date"}, inplace=True) df_nextDayPlayer["date"] = pd.to_datetime(df_nextDayPlayer["date"], format="%Y-%m-%d") # df_nextDayPlayer = df_nextDayPlayer.set_index('date').to_period('D') # -----------------------------------------df_test-------------------------------------------- df_test = pd.read_csv( "../input/mlb-player-digital-engagement-forecasting/example_sample_submission.csv" ) df_test = df_test.reset_index() df_test["date"] = pd.to_datetime(df_test["date"], format="%Y%m%d") df_test["playerId"] = ( df_test["date_playerId"].apply(lambda x: x.split("_")[1]).astype(int) ) # --------Sample median data-------- lag = 100 for x in range(lag): df_test["date"] = df_test["date"] - timedelta(days=1) df_test = df_test.merge( df_nextDayPlayer, how="left", on=["date", "playerId"], suffixes=["", f"_{x+1}"] ) df_test = df_test.fillna(0.0) for x in range(4): columns = [f"target{x+1}_{i+1}" for i in range(lag)] df_test[f"target{x+1}_median"] = df_test[columns].median(axis=1) df_test = df_test.drop(columns=columns) df_test targets_cols = [ "playerId", "target1_median", "target2_median", "target3_median", "target4_median", ] df_compose = df_test[targets_cols] df_compose = df_compose.rename( columns={ "target1_median": "target1", "target2_median": "target2", "target3_median": "target3", "target4_median": "target4", } ) df_compose # ### Sample data on mlb env import mlb env = mlb.make_env() # initialize the environment iter_test = env.iter_test() # iterator which loops over each date in test set lag = 100 targets_median = [ "target1_median", "target2_median", "target3_median", "target4_median", ] for test_df, sample_prediction_df in iter_test: # ----sample data--------------------------- df_train = df_test[targets_median].values scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(df_train) seq_time = 365 * 4 test = scaled[:seq_time, :] train = scaled[seq_time:, :] X_train, y_train = train[:, :-1], train[:, -1] X_test, y_test = test[:, :-1], test[:, -1] X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1])) X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1])) # ----make predict---- model = keras.models.load_model("../input/modelplace/model .h5") pred = model.predict(X_test) # ----sample for submission----- X_test = X_test.reshape((X_test.shape[0], X_test.shape[2])) pred = np.concatenate((pred, X_test[:, -4:]), axis=1) pred = pd.DataFrame(pred, columns=["target1", "target2", "target3", "target4"]) df_sample = df_compose.join(pred, lsuffix="_left") df_sample = df_sample.drop( columns=["target1_left", "target2_left", "target3_left", "target4_left"] ) # ------sample_prediction----- sample_prediction_df = sample_prediction_df.reset_index() sample_prediction_df["date"] = pd.to_datetime( sample_prediction_df["date"], format="%Y%m%d" ) sample_prediction_df["playerId"] = ( sample_prediction_df["date_playerId"] .apply(lambda x: x.split("_")[1]) .astype(int) ) sample_prediction_df = sample_prediction_df.drop( columns=["target1", "target2", "target3", "target4"] ) sample_prediction_df = pd.merge( sample_prediction_df, df_sample, how="outer", on="playerId" ) sample_prediction_df = sample_prediction_df.fillna(0.0) env.predict(sample_prediction_df) sample_prediction_df
false
0
6,714
0
6,714
6,714
69248267
from learntools.core import binder binder.bind(globals()) from learntools.python.ex4 import * print("Setup complete.") # # 1. # Complete the function below according to its docstring. def select_second(L): """Return the second element of the given list. If the list has no second element, return None. """ return L[1] if len(L) > 1 else None # Check your answer q1.check() # q1.hint() # q1.solution() # # 2. # You are analyzing sports teams. Members of each team are stored in a list. The Coach is the first name in the list, the captain is the second name in the list, and other players are listed after that. # These lists are stored in another list, which starts with the best team and proceeds through the list to the worst team last. Complete the function below to select the **captain** of the worst team. def losing_team_captain(teams): """Given a list of teams, where each team is a list of names, return the 2nd player (captain) from the last listed team """ return teams[-1][1] # Check your answer q2.check() # q2.hint() # q2.solution() # # 3. # The next iteration of Mario Kart will feature an extra-infuriating new item, the *Purple Shell*. When used, it warps the last place racer into first place and the first place racer into last place. Complete the function below to implement the Purple Shell's effect. def purple_shell(racers): """Given a list of racers, set the first place racer (at the front of the list) to last place and vice versa. >>> r = ["Mario", "Bowser", "Luigi"] >>> purple_shell(r) >>> r ["Luigi", "Bowser", "Mario"] """ racers[0], racers[-1] = racers[-1], racers[0] # Check your answer q3.check() # q3.hint() # q3.solution() # # 4. # What are the lengths of the following lists? Fill in the variable `lengths` with your predictions. (Try to make a prediction for each list *without* just calling `len()` on it.) a = [1, 2, 3] b = [1, [2, 3]] c = [] d = [1, 2, 3][1:] # Put your predictions in the list below. Lengths should contain 4 numbers, the # first being the length of a, the second being the length of b and so on. lengths = [3, 2, 0, 2] # Check your answer q4.check() # line below provides some explanation # q4.solution() # # 5. 🌶️ # We're using lists to record people who attended our party and what order they arrived in. For example, the following list represents a party with 7 guests, in which Adela showed up first and Ford was the last to arrive: # party_attendees = ['Adela', 'Fleda', 'Owen', 'May', 'Mona', 'Gilbert', 'Ford'] # A guest is considered 'fashionably late' if they arrived after at least half of the party's guests. However, they must not be the very last guest (that's taking it too far). In the above example, Mona and Gilbert are the only guests who were fashionably late. # Complete the function below which takes a list of party attendees as well as a person, and tells us whether that person is fashionably late. def fashionably_late(arrivals, name): """Given an ordered list of arrivals to the party and a name, return whether the guest with that name was fashionably late. """ # Find the Count of Guests attending the Party Count_of_Guests = len(arrivals) # Find the count of half of the guest Half_of_Guest = (Count_of_Guests // 2) + (0 if Count_of_Guests % 2 == 0 else 1) # Find the Guest's arrival position in the party Guest_arrv_pos = arrivals.index(name) + 1 # Check for the Guest's arrival with respect to others and print accordingly if Guest_arrv_pos > Half_of_Guest and Guest_arrv_pos < Count_of_Guests: return True else: return False # Check your answer q5.check() # q5.hint() # q5.solution()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/248/69248267.ipynb
null
null
[{"Id": 69248267, "ScriptId": 18884106, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7927593, "CreationDate": "07/28/2021 15:38:49", "VersionNumber": 1.0, "Title": "Exercise: Lists", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 129.0, "LinesInsertedFromPrevious": 18.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 111.0, "LinesInsertedFromFork": 18.0, "LinesDeletedFromFork": 5.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 111.0, "TotalVotes": 0}]
null
null
null
null
from learntools.core import binder binder.bind(globals()) from learntools.python.ex4 import * print("Setup complete.") # # 1. # Complete the function below according to its docstring. def select_second(L): """Return the second element of the given list. If the list has no second element, return None. """ return L[1] if len(L) > 1 else None # Check your answer q1.check() # q1.hint() # q1.solution() # # 2. # You are analyzing sports teams. Members of each team are stored in a list. The Coach is the first name in the list, the captain is the second name in the list, and other players are listed after that. # These lists are stored in another list, which starts with the best team and proceeds through the list to the worst team last. Complete the function below to select the **captain** of the worst team. def losing_team_captain(teams): """Given a list of teams, where each team is a list of names, return the 2nd player (captain) from the last listed team """ return teams[-1][1] # Check your answer q2.check() # q2.hint() # q2.solution() # # 3. # The next iteration of Mario Kart will feature an extra-infuriating new item, the *Purple Shell*. When used, it warps the last place racer into first place and the first place racer into last place. Complete the function below to implement the Purple Shell's effect. def purple_shell(racers): """Given a list of racers, set the first place racer (at the front of the list) to last place and vice versa. >>> r = ["Mario", "Bowser", "Luigi"] >>> purple_shell(r) >>> r ["Luigi", "Bowser", "Mario"] """ racers[0], racers[-1] = racers[-1], racers[0] # Check your answer q3.check() # q3.hint() # q3.solution() # # 4. # What are the lengths of the following lists? Fill in the variable `lengths` with your predictions. (Try to make a prediction for each list *without* just calling `len()` on it.) a = [1, 2, 3] b = [1, [2, 3]] c = [] d = [1, 2, 3][1:] # Put your predictions in the list below. Lengths should contain 4 numbers, the # first being the length of a, the second being the length of b and so on. lengths = [3, 2, 0, 2] # Check your answer q4.check() # line below provides some explanation # q4.solution() # # 5. 🌶️ # We're using lists to record people who attended our party and what order they arrived in. For example, the following list represents a party with 7 guests, in which Adela showed up first and Ford was the last to arrive: # party_attendees = ['Adela', 'Fleda', 'Owen', 'May', 'Mona', 'Gilbert', 'Ford'] # A guest is considered 'fashionably late' if they arrived after at least half of the party's guests. However, they must not be the very last guest (that's taking it too far). In the above example, Mona and Gilbert are the only guests who were fashionably late. # Complete the function below which takes a list of party attendees as well as a person, and tells us whether that person is fashionably late. def fashionably_late(arrivals, name): """Given an ordered list of arrivals to the party and a name, return whether the guest with that name was fashionably late. """ # Find the Count of Guests attending the Party Count_of_Guests = len(arrivals) # Find the count of half of the guest Half_of_Guest = (Count_of_Guests // 2) + (0 if Count_of_Guests % 2 == 0 else 1) # Find the Guest's arrival position in the party Guest_arrv_pos = arrivals.index(name) + 1 # Check for the Guest's arrival with respect to others and print accordingly if Guest_arrv_pos > Half_of_Guest and Guest_arrv_pos < Count_of_Guests: return True else: return False # Check your answer q5.check() # q5.hint() # q5.solution()
false
0
1,117
0
1,117
1,117
69248937
# This notebook work on using Voting ensemble technique to classify disaster tweets from non disaster tweets # The Classification steps: # * Preprocessing # * remove urls, stopwords, punctuations, and small words # * translate emojis # * Feature Extraction and Analysis # * use unigram to explore frequent words on each class # * use word cloud as explanatory analysis for unigram output # * use bigram to explore frequent bigrams on each class # * use bar graph to visualize bigram results # * apply unigram analysis on both location and keyword columns # * use bar graph to visualize unigram of location and keyword columns # * format feature vector for both training and testing data # * Model Implementation # * use Naive Bayes, Suppprt vector machine, K nearest neighbor, and logistic regression algorithms # * split training data (0.33 for testing) and test it on each algorithm # * apply cross validation on each algorithm # * use class report to evaluate each run # * apply voting ensemble technique on the four algorithms # * apply test feature vectors for each algorthim separately then on voting ensemble technique import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # load necessary libraries import nltk from nltk.corpus import stopwords import re, string import matplotlib.pyplot as plt from wordcloud import WordCloud from emoji import UNICODE_EMOJI import emoji from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.model_selection import train_test_split, cross_validate from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report # load train and test data to dataframes train_df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv", encoding="utf-8") test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv", encoding="utf-8") # visulaize first top 10 columns of train data train_df.head(10) # visulaize first top 10 columns of test data test_df.head(10) # # Preprocessing # preprocessing train data # extract hashtags train_df["hashtags"] = train_df["text"].apply( lambda x: re.findall(r"#(\w+)", x.lower()) ) test_df["hashtags"] = test_df["text"].apply(lambda x: re.findall(r"#(\w+)", x.lower())) # translate emojis to text train_df["clean_text"] = train_df["text"].apply(lambda x: emoji.demojize(x)) test_df["clean_text"] = test_df["text"].apply(lambda x: emoji.demojize(x)) # length feature train_df["len_text"] = train_df["clean_text"].apply(lambda x: len(x.split())) test_df["len_text"] = test_df["clean_text"].apply(lambda x: len(x.split())) # remove urls train_df["clean_text"] = train_df["clean_text"].apply( lambda x: re.sub(r"http:\S+", "", x) ) train_df["clean_text"] = train_df["clean_text"].apply( lambda x: re.sub(r"https:\S+", "", x) ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: re.sub(r"http:\S+", "", x) ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: re.sub(r"https:\S+", "", x) ) # tokenize tweets train_df["clean_text"] = train_df["clean_text"].apply( lambda x: nltk.word_tokenize(x.strip().lower()) ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: nltk.word_tokenize(x.strip().lower()) ) # remove punctuations from tweets train_df["clean_text"] = train_df["clean_text"].apply( lambda x: [re.sub(r"[" + string.punctuation + "]", "", y.strip()) for y in x] ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: [re.sub(r"[" + string.punctuation + "]", "", y.strip()) for y in x] ) # load stopwords set stopwrds = set(stopwords.words("english")) # remove stop words from tweets train_df["clean_text"] = train_df["clean_text"].apply( lambda x: [y for y in x if (y.strip() not in stopwrds)] ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: [y for y in x if (y.strip() not in stopwrds)] ) # remove new lines in tweets train_df["clean_text"] = train_df["clean_text"].apply( lambda x: [re.sub("\\n", "", y.strip()) for y in x] ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: [re.sub("\\n", "", y.strip()) for y in x] ) # remove spaces and small words from tweets train_df["clean_text"] = train_df["clean_text"].apply( lambda x: [y.strip() for y in x if (y.strip() != "") and len(y.strip()) > 2] ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: [y.strip() for y in x if (y.strip() != "") and len(y.strip()) > 2] ) # convert tokens of tweets to text train_df["clean_text"] = train_df["clean_text"].apply(lambda x: " ".join(x)) test_df["clean_text"] = test_df["clean_text"].apply(lambda x: " ".join(x)) # convert tokens hashtags to text train_df["hashtags"] = train_df["hashtags"].apply(lambda x: " ".join(x)) test_df["hashtags"] = test_df["hashtags"].apply(lambda x: " ".join(x)) # convert lower cases of keyword and location train_df["keyword"] = train_df["keyword"].apply( lambda x: x if str(x).lower() == "nan" else x.lower() ) train_df["location"] = train_df["location"].apply( lambda x: x if str(x).lower() == "nan" else x.lower() ) test_df["keyword"] = test_df["keyword"].apply( lambda x: x if str(x).lower() == "nan" else x.lower() ) test_df["location"] = test_df["location"].apply( lambda x: x if str(x).lower() == "nan" else x.lower() ) # visualize data train_df.head(10) test_df.head(10) # # Analysis # **Data Statistics** # total data length print("length of train data", len(train_df)) print("length of test data", len(test_df)) # unique location and keyword size of data print("Checking train location column values", len(train_df.location.unique())) print("Checking train keyword column values", len(train_df.keyword.unique())) print("Checking test location column values", len(test_df.location.unique())) print("Checking test keyword column values", len(test_df.keyword.unique())) # number of disaster tweets print("disaster tweets", len(train_df[train_df["target"] == 1])) print("non-disaster tweets", len(train_df[train_df["target"] == 0])) # **Graphical analysis** plt.subplots(1, 2, figsize=(10, 5)) # visualize top 20 train unique keywords plt.subplot(1, 2, 1) train_df.keyword.value_counts()[:20].plot(kind="bar", title="Unique Keywords") # visualize top 20 train unique locations plt.subplot(1, 2, 2) train_df.location.value_counts()[:20].plot(kind="bar", title="Unique Locations") plt.show() plt.subplots(1, 2, figsize=(10, 5)) # visualize top 20 disaster tweets and their keywords bar graph plt.subplot(1, 2, 1) train_df[train_df["target"] == 1].keyword.value_counts()[:20].plot( kind="bar", title="Disaster tweets keywords" ) # visualize top 20 non disaster tweets and their keywords bar graph plt.subplot(1, 2, 2) train_df[train_df["target"] == 0].keyword.value_counts()[:20].plot( kind="bar", title="Non-Disaster tweets keywords" ) plt.show() plt.subplots(1, 2, figsize=(10, 5)) # visualize top 20 disaster tweets and their locations bar graph plt.subplot(1, 2, 1) train_df[train_df["target"] == 1].location.value_counts()[:20].plot( kind="bar", title="Disaster tweets Locations" ) # visualize top 20 non disaster tweets and their locations bar graph plt.subplot(1, 2, 2) train_df[train_df["target"] == 0].location.value_counts()[:20].plot( kind="bar", title="Non-Disaster tweets Locations" ) plt.show() import seaborn as sns sns.countplot(x="len_text", data=train_df[train_df["target"] == 1]) sns.countplot(x="len_text", data=train_df[train_df["target"] == 0]) # # Feature Extraction plt.subplots(1, 2, figsize=(15, 15)) plt.subplot(1, 2, 1) # Uigram Frequency distribution for disaster tweets # convert disaster tweets into single string txt = " ".join(train_df[train_df["target"] == 1]["clean_text"]) disaster_unigram = nltk.FreqDist(nltk.word_tokenize(txt)) # visualize unigram frequency distribution for disaster tweets using wordcloud disaster_wc = WordCloud(width=800, height=400, max_words=100).generate_from_frequencies( disaster_unigram ) plt.title("Disaster Unigram Frequency Distribution") plt.imshow(disaster_wc, interpolation="bilinear") plt.subplot(1, 2, 2) # Uigram Frequency distribution for non disaster tweets # convert non disaster tweets into single string txt = " ".join(train_df[train_df["target"] == 0]["clean_text"]) nondisaster_unigram = nltk.FreqDist(nltk.word_tokenize(txt)) # visualize unigram frequency distribution for non disaster tweets using wordcloud nondisaster_wc = WordCloud( width=800, height=400, max_words=100 ).generate_from_frequencies(nondisaster_unigram) plt.title("Non Disaster Unigram Frequency Distribution") plt.imshow(nondisaster_wc, interpolation="bilinear") plt.subplots(1, 2, figsize=(15, 10)) plt.subplot(1, 2, 1) # Bigram Frequency distribution for disaster tweets # convert disaster tweets into single string txt = " ".join(train_df[train_df["target"] == 1]["clean_text"]) disaster_bigram = nltk.FreqDist(nltk.bigrams(nltk.word_tokenize(txt))) tmplst = disaster_bigram.most_common(30) # visualize Bigram frequency distribution for disaster tweets using bar graph wrd, cnt = zip(*tmplst) wrd = [x + "," + y for (x, y) in wrd] plt.barh(wrd, cnt) plt.title("Disaster Bigram BarGraph") plt.subplot(1, 2, 2) # Bigram Frequency distribution for non disaster tweets # convert non disaster tweets into single string txt = " ".join(train_df[train_df["target"] == 0]["clean_text"]) nondisaster_bigram = nltk.FreqDist(nltk.bigrams(nltk.word_tokenize(txt))) tmplst = nondisaster_bigram.most_common(30) # visualize Bigram frequency distribution for non disaster tweets using bar graph wrd, cnt = zip(*tmplst) wrd = [x + "," + y for (x, y) in wrd] plt.barh(wrd, cnt) plt.title("Non Disaster Bigram BarGraph") plt.show() plt.subplots(1, 2, figsize=(15, 15)) plt.subplot(1, 2, 1) # Uigram Frequency distribution for disaster hashtags # convert disaster hashtags into single string txt = " ".join(train_df[train_df["target"] == 1]["hashtags"]) disaster_unigram_hash = nltk.FreqDist(nltk.word_tokenize(txt)) # visualize unigram frequency distribution for disaster hashtags using wordcloud disaster_wc = WordCloud(width=800, height=400, max_words=100).generate_from_frequencies( disaster_unigram_hash ) plt.title("Disaster Unigram Frequency Distribution hashtags") plt.imshow(disaster_wc, interpolation="bilinear") plt.subplot(1, 2, 2) # Uigram Frequency distribution for non disaster hashtags # convert non disaster hashtags into single string txt = " ".join(train_df[train_df["target"] == 0]["hashtags"]) nondisaster_unigram_hash = nltk.FreqDist(nltk.word_tokenize(txt)) # visualize unigram frequency distribution for non disaster hashtags using wordcloud nondisaster_wc = WordCloud( width=800, height=400, max_words=100 ).generate_from_frequencies(nondisaster_unigram_hash) plt.title("Non Disaster Unigram Frequency Distribution hashtags") plt.imshow(nondisaster_wc, interpolation="bilinear") # **Convert Tweet to train feature vector** # compute unigram feature vector for tweet likelihood to disaster train_df["unigram_disas"] = train_df["clean_text"].apply( lambda x: sum( [ disaster_unigram.get(wrd) for wrd in nltk.word_tokenize(x) if disaster_unigram.get(wrd) != None ] ) / len(disaster_unigram) ) # compute unigram feature vector for tweet likelihood to non disaster train_df["unigram_nondisas"] = train_df["clean_text"].apply( lambda x: sum( [ nondisaster_unigram.get(wrd) for wrd in nltk.word_tokenize(x) if nondisaster_unigram.get(wrd) != None ] ) / len(nondisaster_unigram) ) # compute unigram feature vector for hashtags likelihood to disaster train_df["unigram_disas_hash"] = train_df["hashtags"].apply( lambda x: sum( [ disaster_unigram_hash.get(wrd) for wrd in nltk.word_tokenize(x) if disaster_unigram_hash.get(wrd) != None ] ) / len(disaster_unigram_hash) ) # compute unigram feature vector for hashtags likelihood to non disaster train_df["unigram_nondisas_hash"] = train_df["hashtags"].apply( lambda x: sum( [ nondisaster_unigram_hash.get(wrd) for wrd in nltk.word_tokenize(x) if nondisaster_unigram_hash.get(wrd) != None ] ) / len(nondisaster_unigram_hash) ) # compute bigram feature vector for tweet likelihood to disaster train_df["bigram_disas"] = train_df["clean_text"].apply( lambda x: sum( [ disaster_bigram.get(wrd) for wrd in nltk.bigrams(nltk.word_tokenize(x)) if disaster_bigram.get(wrd) != None ] ) / len(disaster_bigram) ) # compute bigram feature vector for tweet likelihood to non disaster train_df["bigram_nondisas"] = train_df["clean_text"].apply( lambda x: sum( [ nondisaster_bigram.get(wrd) for wrd in nltk.bigrams(nltk.word_tokenize(x)) if nondisaster_bigram.get(wrd) != None ] ) / len(nondisaster_bigram) ) key_disas = nltk.FreqDist(train_df[train_df["target"] == 1]["keyword"]) # compute unigram keyword to disaster train_df["key_disas"] = train_df["keyword"].apply( lambda x: sum( [key_disas.get(x) if (x in key_disas.keys() and str(x).lower() != "nan") else 0] ) / len(key_disas) ) key_nondisas = nltk.FreqDist(train_df[train_df["target"] == 0]["keyword"]) # compute unigram keyword to non disaster train_df["key_nondisas"] = train_df["keyword"].apply( lambda x: sum( [ key_nondisas.get(x) if (x in key_nondisas.keys() and str(x).lower() != "nan") else 0 ] ) / len(key_nondisas) ) loc_disas = nltk.FreqDist(train_df[train_df["target"] == 1]["location"]) # compute unigram location to disaster train_df["loc_disas"] = train_df["location"].apply( lambda x: sum( [loc_disas.get(x) if (x in loc_disas.keys() and str(x).lower() != "nan") else 0] ) / len(loc_disas) ) loc_nondisas = nltk.FreqDist(train_df[train_df["target"] == 0]["location"]) # compute unigram location to non disaster train_df["loc_nondisas"] = train_df["location"].apply( lambda x: sum( [ loc_nondisas.get(x) if (x in loc_nondisas.keys() and str(x).lower() != "nan") else 0 ] ) / len(loc_nondisas) ) train_df.head(5) # define feature vectors for training dataset train_feature_vectors = train_df[ [ "unigram_disas", "unigram_nondisas", "unigram_disas_hash", "unigram_nondisas_hash", "bigram_disas", "bigram_nondisas", "key_disas", "key_nondisas", "loc_disas", "loc_nondisas", ] ] train_feature_vectors.head(5) # **Convert Tweet to test feature vector** # compute unigram feature vector for tweet likelihood to disaster test_df["unigram_disas"] = test_df["clean_text"].apply( lambda x: sum( [ disaster_unigram.get(wrd) for wrd in nltk.word_tokenize(x) if disaster_unigram.get(wrd) != None ] ) / len(disaster_unigram) ) # compute unigram feature vector for tweet likelihood to non disaster test_df["unigram_nondisas"] = test_df["clean_text"].apply( lambda x: sum( [ nondisaster_unigram.get(wrd) for wrd in nltk.word_tokenize(x) if nondisaster_unigram.get(wrd) != None ] ) / len(nondisaster_unigram) ) # compute unigram feature vector for hashtags likelihood to disaster test_df["unigram_disas_hash"] = test_df["hashtags"].apply( lambda x: sum( [ disaster_unigram_hash.get(wrd) for wrd in nltk.word_tokenize(x) if disaster_unigram_hash.get(wrd) != None ] ) / len(disaster_unigram_hash) ) # compute unigram feature vector for hashtags likelihood to non disaster test_df["unigram_nondisas_hash"] = test_df["hashtags"].apply( lambda x: sum( [ nondisaster_unigram_hash.get(wrd) for wrd in nltk.word_tokenize(x) if nondisaster_unigram_hash.get(wrd) != None ] ) / len(nondisaster_unigram_hash) ) # compute bigram feature vector for tweet likelihood to disaster test_df["bigram_disas"] = test_df["clean_text"].apply( lambda x: sum( [ disaster_bigram.get(wrd) for wrd in nltk.bigrams(nltk.word_tokenize(x)) if disaster_bigram.get(wrd) != None ] ) / len(disaster_bigram) if x.strip() != "" else 0 ) # compute bigram feature vector for tweet likelihood to non disaster test_df["bigram_nondisas"] = test_df["clean_text"].apply( lambda x: sum( [ nondisaster_bigram.get(wrd) for wrd in nltk.bigrams(nltk.word_tokenize(x)) if nondisaster_bigram.get(wrd) != None ] ) / len(nondisaster_bigram) if x.strip() != "" else 0 ) # compute unigram keyword to disaster test_df["key_disas"] = test_df["keyword"].apply( lambda x: sum( [key_disas.get(x) if (x in key_disas.keys() and str(x).lower() != "nan") else 0] ) / len(key_disas) ) # compute unigram keyword to non disaster test_df["key_nondisas"] = test_df["keyword"].apply( lambda x: sum( [ key_nondisas.get(x) if (x in key_nondisas.keys() and str(x).lower() != "nan") else 0 ] ) / len(key_nondisas) ) # compute unigram location to disaster test_df["loc_disas"] = test_df["location"].apply( lambda x: sum( [loc_disas.get(x) if (x in loc_disas.keys() and str(x).lower() != "nan") else 0] ) / len(loc_disas) ) # compute unigram location to non disaster test_df["loc_nondisas"] = test_df["location"].apply( lambda x: sum( [ loc_nondisas.get(x) if (x in loc_nondisas.keys() and str(x).lower() != "nan") else 0 ] ) / len(loc_nondisas) ) # define feature vectors for testing dataset test_feature_vectors = test_df[ [ "unigram_disas", "unigram_nondisas", "unigram_disas_hash", "unigram_nondisas_hash", "bigram_disas", "bigram_nondisas", "key_disas", "key_nondisas", "loc_disas", "loc_nondisas", ] ] test_feature_vectors.head(5) # # Model Building & Training # split train data Y = train_df["target"] X_train, X_test, y_train, y_test = train_test_split( train_feature_vectors, Y, test_size=0.33, random_state=42 ) # Naive Bayes Classifier nb_clf = GaussianNB() # train classifier on train data after splitting nb_clf.fit(X_train, y_train) print(nb_clf.get_params()) print("split training score", nb_clf.score(X_test, y_test)) # train over all trained data applying cross validation print( "NB cross validation scores", cross_validate(nb_clf, train_feature_vectors, Y, cv=5) ) print(classification_report(Y, nb_clf.predict(train_feature_vectors))) # Support Vector Machine svm_clf = SVC(probability=True) # train classifier on train data after splitting svm_clf.fit(X_train, y_train) print(svm_clf.get_params()) print("split training score", svm_clf.score(X_test, y_test)) # train over all trained data applying cross validation print( "SVM cross validation scores", cross_validate(svm_clf, train_feature_vectors, Y, cv=5), ) print(classification_report(Y, svm_clf.predict(train_feature_vectors))) # K nearest neighbor knn_clf = KNeighborsClassifier() # train classifier on train data after splitting knn_clf.fit(X_train, y_train) print(knn_clf.get_params()) print("split training score", knn_clf.score(X_test, y_test)) # train over all trained data applying cross validation print( "KNN cross validation scores", cross_validate(knn_clf, train_feature_vectors, Y, cv=5), ) print(classification_report(Y, knn_clf.predict(train_feature_vectors))) # Logistic Regression logReg_clf = LogisticRegression() # train classifier on train data after splitting logReg_clf.fit(X_train, y_train) print(logReg_clf.get_params()) print("split training score", logReg_clf.score(X_test, y_test)) # train over all trained data applying cross validation print( "LR cross validation score", cross_validate(logReg_clf, train_feature_vectors, Y, cv=5), ) print(classification_report(Y, logReg_clf.predict(train_feature_vectors))) # # Model Testing # Naive Bayes Model on test features nb_clf.predict(test_feature_vectors) # SVM Model on test features svm_clf.predict(test_feature_vectors) # KNN Model on test features knn_clf.predict(test_feature_vectors) # Logistic regression model on test features logReg_clf.predict(test_feature_vectors) # # Voting Classifier on NB,SVM,LR,and KNN models from sklearn.ensemble import VotingClassifier voting_clf = VotingClassifier( estimators=[ ("NB", nb_clf), ("SVM", svm_clf), ("KNN", knn_clf), ("LogReg", logReg_clf), ], voting="soft", ) # train over all trained data applying cross validation voting_clf.fit(train_feature_vectors, Y) print("Voting score", voting_clf.score(train_feature_vectors, Y)) print(classification_report(Y, voting_clf.predict(train_feature_vectors))) # testing model on test feature vectors vals = voting_clf.predict(test_feature_vectors) # save in submission dataframe submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv") submission["id"] = test_df["id"] submission["target"] = vals submission.head(10) submission.to_csv("sample_submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/248/69248937.ipynb
null
null
[{"Id": 69248937, "ScriptId": 13987154, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 811292, "CreationDate": "07/28/2021 15:48:15", "VersionNumber": 19.0, "Title": "Disaster Tweets classification", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 429.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 422.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# This notebook work on using Voting ensemble technique to classify disaster tweets from non disaster tweets # The Classification steps: # * Preprocessing # * remove urls, stopwords, punctuations, and small words # * translate emojis # * Feature Extraction and Analysis # * use unigram to explore frequent words on each class # * use word cloud as explanatory analysis for unigram output # * use bigram to explore frequent bigrams on each class # * use bar graph to visualize bigram results # * apply unigram analysis on both location and keyword columns # * use bar graph to visualize unigram of location and keyword columns # * format feature vector for both training and testing data # * Model Implementation # * use Naive Bayes, Suppprt vector machine, K nearest neighbor, and logistic regression algorithms # * split training data (0.33 for testing) and test it on each algorithm # * apply cross validation on each algorithm # * use class report to evaluate each run # * apply voting ensemble technique on the four algorithms # * apply test feature vectors for each algorthim separately then on voting ensemble technique import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # load necessary libraries import nltk from nltk.corpus import stopwords import re, string import matplotlib.pyplot as plt from wordcloud import WordCloud from emoji import UNICODE_EMOJI import emoji from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.model_selection import train_test_split, cross_validate from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report # load train and test data to dataframes train_df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv", encoding="utf-8") test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv", encoding="utf-8") # visulaize first top 10 columns of train data train_df.head(10) # visulaize first top 10 columns of test data test_df.head(10) # # Preprocessing # preprocessing train data # extract hashtags train_df["hashtags"] = train_df["text"].apply( lambda x: re.findall(r"#(\w+)", x.lower()) ) test_df["hashtags"] = test_df["text"].apply(lambda x: re.findall(r"#(\w+)", x.lower())) # translate emojis to text train_df["clean_text"] = train_df["text"].apply(lambda x: emoji.demojize(x)) test_df["clean_text"] = test_df["text"].apply(lambda x: emoji.demojize(x)) # length feature train_df["len_text"] = train_df["clean_text"].apply(lambda x: len(x.split())) test_df["len_text"] = test_df["clean_text"].apply(lambda x: len(x.split())) # remove urls train_df["clean_text"] = train_df["clean_text"].apply( lambda x: re.sub(r"http:\S+", "", x) ) train_df["clean_text"] = train_df["clean_text"].apply( lambda x: re.sub(r"https:\S+", "", x) ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: re.sub(r"http:\S+", "", x) ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: re.sub(r"https:\S+", "", x) ) # tokenize tweets train_df["clean_text"] = train_df["clean_text"].apply( lambda x: nltk.word_tokenize(x.strip().lower()) ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: nltk.word_tokenize(x.strip().lower()) ) # remove punctuations from tweets train_df["clean_text"] = train_df["clean_text"].apply( lambda x: [re.sub(r"[" + string.punctuation + "]", "", y.strip()) for y in x] ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: [re.sub(r"[" + string.punctuation + "]", "", y.strip()) for y in x] ) # load stopwords set stopwrds = set(stopwords.words("english")) # remove stop words from tweets train_df["clean_text"] = train_df["clean_text"].apply( lambda x: [y for y in x if (y.strip() not in stopwrds)] ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: [y for y in x if (y.strip() not in stopwrds)] ) # remove new lines in tweets train_df["clean_text"] = train_df["clean_text"].apply( lambda x: [re.sub("\\n", "", y.strip()) for y in x] ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: [re.sub("\\n", "", y.strip()) for y in x] ) # remove spaces and small words from tweets train_df["clean_text"] = train_df["clean_text"].apply( lambda x: [y.strip() for y in x if (y.strip() != "") and len(y.strip()) > 2] ) test_df["clean_text"] = test_df["clean_text"].apply( lambda x: [y.strip() for y in x if (y.strip() != "") and len(y.strip()) > 2] ) # convert tokens of tweets to text train_df["clean_text"] = train_df["clean_text"].apply(lambda x: " ".join(x)) test_df["clean_text"] = test_df["clean_text"].apply(lambda x: " ".join(x)) # convert tokens hashtags to text train_df["hashtags"] = train_df["hashtags"].apply(lambda x: " ".join(x)) test_df["hashtags"] = test_df["hashtags"].apply(lambda x: " ".join(x)) # convert lower cases of keyword and location train_df["keyword"] = train_df["keyword"].apply( lambda x: x if str(x).lower() == "nan" else x.lower() ) train_df["location"] = train_df["location"].apply( lambda x: x if str(x).lower() == "nan" else x.lower() ) test_df["keyword"] = test_df["keyword"].apply( lambda x: x if str(x).lower() == "nan" else x.lower() ) test_df["location"] = test_df["location"].apply( lambda x: x if str(x).lower() == "nan" else x.lower() ) # visualize data train_df.head(10) test_df.head(10) # # Analysis # **Data Statistics** # total data length print("length of train data", len(train_df)) print("length of test data", len(test_df)) # unique location and keyword size of data print("Checking train location column values", len(train_df.location.unique())) print("Checking train keyword column values", len(train_df.keyword.unique())) print("Checking test location column values", len(test_df.location.unique())) print("Checking test keyword column values", len(test_df.keyword.unique())) # number of disaster tweets print("disaster tweets", len(train_df[train_df["target"] == 1])) print("non-disaster tweets", len(train_df[train_df["target"] == 0])) # **Graphical analysis** plt.subplots(1, 2, figsize=(10, 5)) # visualize top 20 train unique keywords plt.subplot(1, 2, 1) train_df.keyword.value_counts()[:20].plot(kind="bar", title="Unique Keywords") # visualize top 20 train unique locations plt.subplot(1, 2, 2) train_df.location.value_counts()[:20].plot(kind="bar", title="Unique Locations") plt.show() plt.subplots(1, 2, figsize=(10, 5)) # visualize top 20 disaster tweets and their keywords bar graph plt.subplot(1, 2, 1) train_df[train_df["target"] == 1].keyword.value_counts()[:20].plot( kind="bar", title="Disaster tweets keywords" ) # visualize top 20 non disaster tweets and their keywords bar graph plt.subplot(1, 2, 2) train_df[train_df["target"] == 0].keyword.value_counts()[:20].plot( kind="bar", title="Non-Disaster tweets keywords" ) plt.show() plt.subplots(1, 2, figsize=(10, 5)) # visualize top 20 disaster tweets and their locations bar graph plt.subplot(1, 2, 1) train_df[train_df["target"] == 1].location.value_counts()[:20].plot( kind="bar", title="Disaster tweets Locations" ) # visualize top 20 non disaster tweets and their locations bar graph plt.subplot(1, 2, 2) train_df[train_df["target"] == 0].location.value_counts()[:20].plot( kind="bar", title="Non-Disaster tweets Locations" ) plt.show() import seaborn as sns sns.countplot(x="len_text", data=train_df[train_df["target"] == 1]) sns.countplot(x="len_text", data=train_df[train_df["target"] == 0]) # # Feature Extraction plt.subplots(1, 2, figsize=(15, 15)) plt.subplot(1, 2, 1) # Uigram Frequency distribution for disaster tweets # convert disaster tweets into single string txt = " ".join(train_df[train_df["target"] == 1]["clean_text"]) disaster_unigram = nltk.FreqDist(nltk.word_tokenize(txt)) # visualize unigram frequency distribution for disaster tweets using wordcloud disaster_wc = WordCloud(width=800, height=400, max_words=100).generate_from_frequencies( disaster_unigram ) plt.title("Disaster Unigram Frequency Distribution") plt.imshow(disaster_wc, interpolation="bilinear") plt.subplot(1, 2, 2) # Uigram Frequency distribution for non disaster tweets # convert non disaster tweets into single string txt = " ".join(train_df[train_df["target"] == 0]["clean_text"]) nondisaster_unigram = nltk.FreqDist(nltk.word_tokenize(txt)) # visualize unigram frequency distribution for non disaster tweets using wordcloud nondisaster_wc = WordCloud( width=800, height=400, max_words=100 ).generate_from_frequencies(nondisaster_unigram) plt.title("Non Disaster Unigram Frequency Distribution") plt.imshow(nondisaster_wc, interpolation="bilinear") plt.subplots(1, 2, figsize=(15, 10)) plt.subplot(1, 2, 1) # Bigram Frequency distribution for disaster tweets # convert disaster tweets into single string txt = " ".join(train_df[train_df["target"] == 1]["clean_text"]) disaster_bigram = nltk.FreqDist(nltk.bigrams(nltk.word_tokenize(txt))) tmplst = disaster_bigram.most_common(30) # visualize Bigram frequency distribution for disaster tweets using bar graph wrd, cnt = zip(*tmplst) wrd = [x + "," + y for (x, y) in wrd] plt.barh(wrd, cnt) plt.title("Disaster Bigram BarGraph") plt.subplot(1, 2, 2) # Bigram Frequency distribution for non disaster tweets # convert non disaster tweets into single string txt = " ".join(train_df[train_df["target"] == 0]["clean_text"]) nondisaster_bigram = nltk.FreqDist(nltk.bigrams(nltk.word_tokenize(txt))) tmplst = nondisaster_bigram.most_common(30) # visualize Bigram frequency distribution for non disaster tweets using bar graph wrd, cnt = zip(*tmplst) wrd = [x + "," + y for (x, y) in wrd] plt.barh(wrd, cnt) plt.title("Non Disaster Bigram BarGraph") plt.show() plt.subplots(1, 2, figsize=(15, 15)) plt.subplot(1, 2, 1) # Uigram Frequency distribution for disaster hashtags # convert disaster hashtags into single string txt = " ".join(train_df[train_df["target"] == 1]["hashtags"]) disaster_unigram_hash = nltk.FreqDist(nltk.word_tokenize(txt)) # visualize unigram frequency distribution for disaster hashtags using wordcloud disaster_wc = WordCloud(width=800, height=400, max_words=100).generate_from_frequencies( disaster_unigram_hash ) plt.title("Disaster Unigram Frequency Distribution hashtags") plt.imshow(disaster_wc, interpolation="bilinear") plt.subplot(1, 2, 2) # Uigram Frequency distribution for non disaster hashtags # convert non disaster hashtags into single string txt = " ".join(train_df[train_df["target"] == 0]["hashtags"]) nondisaster_unigram_hash = nltk.FreqDist(nltk.word_tokenize(txt)) # visualize unigram frequency distribution for non disaster hashtags using wordcloud nondisaster_wc = WordCloud( width=800, height=400, max_words=100 ).generate_from_frequencies(nondisaster_unigram_hash) plt.title("Non Disaster Unigram Frequency Distribution hashtags") plt.imshow(nondisaster_wc, interpolation="bilinear") # **Convert Tweet to train feature vector** # compute unigram feature vector for tweet likelihood to disaster train_df["unigram_disas"] = train_df["clean_text"].apply( lambda x: sum( [ disaster_unigram.get(wrd) for wrd in nltk.word_tokenize(x) if disaster_unigram.get(wrd) != None ] ) / len(disaster_unigram) ) # compute unigram feature vector for tweet likelihood to non disaster train_df["unigram_nondisas"] = train_df["clean_text"].apply( lambda x: sum( [ nondisaster_unigram.get(wrd) for wrd in nltk.word_tokenize(x) if nondisaster_unigram.get(wrd) != None ] ) / len(nondisaster_unigram) ) # compute unigram feature vector for hashtags likelihood to disaster train_df["unigram_disas_hash"] = train_df["hashtags"].apply( lambda x: sum( [ disaster_unigram_hash.get(wrd) for wrd in nltk.word_tokenize(x) if disaster_unigram_hash.get(wrd) != None ] ) / len(disaster_unigram_hash) ) # compute unigram feature vector for hashtags likelihood to non disaster train_df["unigram_nondisas_hash"] = train_df["hashtags"].apply( lambda x: sum( [ nondisaster_unigram_hash.get(wrd) for wrd in nltk.word_tokenize(x) if nondisaster_unigram_hash.get(wrd) != None ] ) / len(nondisaster_unigram_hash) ) # compute bigram feature vector for tweet likelihood to disaster train_df["bigram_disas"] = train_df["clean_text"].apply( lambda x: sum( [ disaster_bigram.get(wrd) for wrd in nltk.bigrams(nltk.word_tokenize(x)) if disaster_bigram.get(wrd) != None ] ) / len(disaster_bigram) ) # compute bigram feature vector for tweet likelihood to non disaster train_df["bigram_nondisas"] = train_df["clean_text"].apply( lambda x: sum( [ nondisaster_bigram.get(wrd) for wrd in nltk.bigrams(nltk.word_tokenize(x)) if nondisaster_bigram.get(wrd) != None ] ) / len(nondisaster_bigram) ) key_disas = nltk.FreqDist(train_df[train_df["target"] == 1]["keyword"]) # compute unigram keyword to disaster train_df["key_disas"] = train_df["keyword"].apply( lambda x: sum( [key_disas.get(x) if (x in key_disas.keys() and str(x).lower() != "nan") else 0] ) / len(key_disas) ) key_nondisas = nltk.FreqDist(train_df[train_df["target"] == 0]["keyword"]) # compute unigram keyword to non disaster train_df["key_nondisas"] = train_df["keyword"].apply( lambda x: sum( [ key_nondisas.get(x) if (x in key_nondisas.keys() and str(x).lower() != "nan") else 0 ] ) / len(key_nondisas) ) loc_disas = nltk.FreqDist(train_df[train_df["target"] == 1]["location"]) # compute unigram location to disaster train_df["loc_disas"] = train_df["location"].apply( lambda x: sum( [loc_disas.get(x) if (x in loc_disas.keys() and str(x).lower() != "nan") else 0] ) / len(loc_disas) ) loc_nondisas = nltk.FreqDist(train_df[train_df["target"] == 0]["location"]) # compute unigram location to non disaster train_df["loc_nondisas"] = train_df["location"].apply( lambda x: sum( [ loc_nondisas.get(x) if (x in loc_nondisas.keys() and str(x).lower() != "nan") else 0 ] ) / len(loc_nondisas) ) train_df.head(5) # define feature vectors for training dataset train_feature_vectors = train_df[ [ "unigram_disas", "unigram_nondisas", "unigram_disas_hash", "unigram_nondisas_hash", "bigram_disas", "bigram_nondisas", "key_disas", "key_nondisas", "loc_disas", "loc_nondisas", ] ] train_feature_vectors.head(5) # **Convert Tweet to test feature vector** # compute unigram feature vector for tweet likelihood to disaster test_df["unigram_disas"] = test_df["clean_text"].apply( lambda x: sum( [ disaster_unigram.get(wrd) for wrd in nltk.word_tokenize(x) if disaster_unigram.get(wrd) != None ] ) / len(disaster_unigram) ) # compute unigram feature vector for tweet likelihood to non disaster test_df["unigram_nondisas"] = test_df["clean_text"].apply( lambda x: sum( [ nondisaster_unigram.get(wrd) for wrd in nltk.word_tokenize(x) if nondisaster_unigram.get(wrd) != None ] ) / len(nondisaster_unigram) ) # compute unigram feature vector for hashtags likelihood to disaster test_df["unigram_disas_hash"] = test_df["hashtags"].apply( lambda x: sum( [ disaster_unigram_hash.get(wrd) for wrd in nltk.word_tokenize(x) if disaster_unigram_hash.get(wrd) != None ] ) / len(disaster_unigram_hash) ) # compute unigram feature vector for hashtags likelihood to non disaster test_df["unigram_nondisas_hash"] = test_df["hashtags"].apply( lambda x: sum( [ nondisaster_unigram_hash.get(wrd) for wrd in nltk.word_tokenize(x) if nondisaster_unigram_hash.get(wrd) != None ] ) / len(nondisaster_unigram_hash) ) # compute bigram feature vector for tweet likelihood to disaster test_df["bigram_disas"] = test_df["clean_text"].apply( lambda x: sum( [ disaster_bigram.get(wrd) for wrd in nltk.bigrams(nltk.word_tokenize(x)) if disaster_bigram.get(wrd) != None ] ) / len(disaster_bigram) if x.strip() != "" else 0 ) # compute bigram feature vector for tweet likelihood to non disaster test_df["bigram_nondisas"] = test_df["clean_text"].apply( lambda x: sum( [ nondisaster_bigram.get(wrd) for wrd in nltk.bigrams(nltk.word_tokenize(x)) if nondisaster_bigram.get(wrd) != None ] ) / len(nondisaster_bigram) if x.strip() != "" else 0 ) # compute unigram keyword to disaster test_df["key_disas"] = test_df["keyword"].apply( lambda x: sum( [key_disas.get(x) if (x in key_disas.keys() and str(x).lower() != "nan") else 0] ) / len(key_disas) ) # compute unigram keyword to non disaster test_df["key_nondisas"] = test_df["keyword"].apply( lambda x: sum( [ key_nondisas.get(x) if (x in key_nondisas.keys() and str(x).lower() != "nan") else 0 ] ) / len(key_nondisas) ) # compute unigram location to disaster test_df["loc_disas"] = test_df["location"].apply( lambda x: sum( [loc_disas.get(x) if (x in loc_disas.keys() and str(x).lower() != "nan") else 0] ) / len(loc_disas) ) # compute unigram location to non disaster test_df["loc_nondisas"] = test_df["location"].apply( lambda x: sum( [ loc_nondisas.get(x) if (x in loc_nondisas.keys() and str(x).lower() != "nan") else 0 ] ) / len(loc_nondisas) ) # define feature vectors for testing dataset test_feature_vectors = test_df[ [ "unigram_disas", "unigram_nondisas", "unigram_disas_hash", "unigram_nondisas_hash", "bigram_disas", "bigram_nondisas", "key_disas", "key_nondisas", "loc_disas", "loc_nondisas", ] ] test_feature_vectors.head(5) # # Model Building & Training # split train data Y = train_df["target"] X_train, X_test, y_train, y_test = train_test_split( train_feature_vectors, Y, test_size=0.33, random_state=42 ) # Naive Bayes Classifier nb_clf = GaussianNB() # train classifier on train data after splitting nb_clf.fit(X_train, y_train) print(nb_clf.get_params()) print("split training score", nb_clf.score(X_test, y_test)) # train over all trained data applying cross validation print( "NB cross validation scores", cross_validate(nb_clf, train_feature_vectors, Y, cv=5) ) print(classification_report(Y, nb_clf.predict(train_feature_vectors))) # Support Vector Machine svm_clf = SVC(probability=True) # train classifier on train data after splitting svm_clf.fit(X_train, y_train) print(svm_clf.get_params()) print("split training score", svm_clf.score(X_test, y_test)) # train over all trained data applying cross validation print( "SVM cross validation scores", cross_validate(svm_clf, train_feature_vectors, Y, cv=5), ) print(classification_report(Y, svm_clf.predict(train_feature_vectors))) # K nearest neighbor knn_clf = KNeighborsClassifier() # train classifier on train data after splitting knn_clf.fit(X_train, y_train) print(knn_clf.get_params()) print("split training score", knn_clf.score(X_test, y_test)) # train over all trained data applying cross validation print( "KNN cross validation scores", cross_validate(knn_clf, train_feature_vectors, Y, cv=5), ) print(classification_report(Y, knn_clf.predict(train_feature_vectors))) # Logistic Regression logReg_clf = LogisticRegression() # train classifier on train data after splitting logReg_clf.fit(X_train, y_train) print(logReg_clf.get_params()) print("split training score", logReg_clf.score(X_test, y_test)) # train over all trained data applying cross validation print( "LR cross validation score", cross_validate(logReg_clf, train_feature_vectors, Y, cv=5), ) print(classification_report(Y, logReg_clf.predict(train_feature_vectors))) # # Model Testing # Naive Bayes Model on test features nb_clf.predict(test_feature_vectors) # SVM Model on test features svm_clf.predict(test_feature_vectors) # KNN Model on test features knn_clf.predict(test_feature_vectors) # Logistic regression model on test features logReg_clf.predict(test_feature_vectors) # # Voting Classifier on NB,SVM,LR,and KNN models from sklearn.ensemble import VotingClassifier voting_clf = VotingClassifier( estimators=[ ("NB", nb_clf), ("SVM", svm_clf), ("KNN", knn_clf), ("LogReg", logReg_clf), ], voting="soft", ) # train over all trained data applying cross validation voting_clf.fit(train_feature_vectors, Y) print("Voting score", voting_clf.score(train_feature_vectors, Y)) print(classification_report(Y, voting_clf.predict(train_feature_vectors))) # testing model on test feature vectors vals = voting_clf.predict(test_feature_vectors) # save in submission dataframe submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv") submission["id"] = test_df["id"] submission["target"] = vals submission.head(10) submission.to_csv("sample_submission.csv", index=False)
false
0
7,137
0
7,137
7,137
69248657
# # Titanic-Machine Learning from Disaster - Ishan Saksena # ### Introduction # The sinking of the Titanic is one of the most infamous shipwrecks in history. # On April 15, 1912, during her maiden voyage, the widely considered “unsinkable” RMS Titanic sank after colliding with an iceberg. Unfortunately, there weren’t enough lifeboats for everyone onboard, resulting in the death of 1502 out of 2224 passengers and crew. # Aim: Predicting the survival status of the passengers in test.csv import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pdb # Python debugger from IPython.display import Image, display from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.feature_selection import RFECV from sklearn.metrics import accuracy_score from sklearn.tree import export_graphviz from sklearn.impute import KNNImputer from xgboost import XGBRegressor from xgboost import XGBClassifier from sklearn.model_selection import ( cross_val_score, StratifiedKFold, learning_curve, train_test_split, GridSearchCV, ) sns.set() pd.set_option("display.max_colwidth", None) pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", None) df_train = pd.read_csv("../input/titanic/train.csv") df_test = pd.read_csv("../input/titanic/test.csv") df_data = df_train.append(df_test).reset_index(drop=True) df_submission = pd.read_csv("../input/titanic/gender_submission.csv") df_train.head() print("Training null values\n") print(df_train.isnull().sum()) print("-" * 30) print("Testing null values\n") print(df_test.isnull().sum()) print("Training info\n") print(df_train.info()) print("-" * 30) print("Testing info\n") print(df_test.info()) df_data.describe() # ## Some Observations from the Data # ### Features # PassengerId # Survived # Pclass (Ticket class) # Name # Sex # Age # SibSp (# of siblings / spouses aboard the Titanic) # Parch (# of parents / children aboard the Titanic) # Ticket # Fare # Cabin # Embarked (Port of Embarkation) # # ### Missing Values # Age and Cabin have a number of missing values, Embarked has some # ### Type # Categorical features: Survived, Sex, Embarked, and Pclass(ordinal). # Numercial features: Age, Fare. Discrete: SibSp, Parch. # # ### Distribution # Only Name has 100% unique values # ## Assumtions based on data analysis # ### Correlating. # - Will use heatmaps and other visual methods to figure out which data correlates to survival the most. # ### Completing. # - Fill Age value - A vital component # - Embarked value # - Cabin value - Has a lot of missing values and might not have high correlation. Potential Drop. # ### Correcting. # - Name and ID have no correlation with survival and can be dropped for the model. # - Some duplicates in the Ticket value is an interesting discovery. We can use this piece of information later on with a simple assumption - Passengers with same ticket values may be acquainted with each other. # - Cabin can be dropped since we cannot assume that people with missing cabin entry were any different. # ### Creating. # - We could build a feature known as Family - Using the columns Sibsp and Parch. # - We could extract the title from Name # - Turning the Age and Fair features into ordinal categorical features with Age Bands and Fair Bins # ### Classifying. # - Some Assumptions: # - Women (Sex=female) and Children (Age<16) had higher survival rate. # - The upper-class passengers (Pclass=1) had higher survival rate. # ## Exporatory Data Analysis (EDA) display(df_data[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean()) display(df_data[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean()) display(df_data[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean()) display(df_data[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean()) display(df_data[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean()) display(df_data[["Embarked", "Survived"]].groupby(["Embarked"], as_index=False).mean()) sns.countplot(x=df_data["Embarked"], hue=df_data["Survived"]) sns.countplot(x=df_data["Sex"], hue=df_data["Survived"]) # ## Feature Engineering and Data Cleaning # mapping the sex feature df_data["Sex#"] = df_data["Sex"].map({"male": 0, "female": 1}) # Create new feature for Family df_data["Fsize"] = ( df_data["Parch"] + df_data["SibSp"] + 1 ) # Family = Self + Sib + Parents/Children # Create new feature for IsAlone df_data["IsAlone"] = 0 df_data.loc[df_data.Fsize == 1, "IsAlone"] = 1 # Plot sns.countplot(x=df_data["IsAlone"], hue=df_data["Survived"]) plt.show() # This looks like important data # Making bins df_data["Fare"].fillna(80.0, inplace=True) df_data.isna().sum() df_data["FareBin"] = pd.qcut(df_data["Fare"], 6) # Mapping the bins label_encoder = LabelEncoder() df_data["FareBin"] = label_encoder.fit_transform(df_data["FareBin"]) # splits again beacuse we just engineered new feature df_train = df_data[: len(df_train)] df_test = df_data[len(df_train) :] # Training set and labels x_train = df_train.drop(labels=["Survived", "PassengerId"], axis=1) y_train = df_train["Survived"] # show columns x_train.columns # Extracting Family name by Regex - might help later df_data["Fname"] = df_data["Name"].str.extract("([A-Za-z]+.[A-Za-z]+)\,", expand=True) # Assuming the cause of duplicate tickets is because the passengers knew each other. duplicates = [] for uniq in df_data["Ticket"].unique(): temp = df_data.loc[df_data["Ticket"] == uniq, "Name"] if temp.count() > 1: duplicates.append( df_data.loc[ df_data["Ticket"] == uniq, ["Name", "Ticket", "Fare", "FareBin", "Fsize", "Survived"], ] ) duplicates = pd.concat(duplicates) duplicates.head(20) df_friend = duplicates.loc[(duplicates.Fsize == 1) & (duplicates.Survived.notnull())] df_family = duplicates.loc[(duplicates.Fsize > 1) & (duplicates.Survived.notnull())] display(df_friend.head(), df_family.head()) print("The Duplicates: ", duplicates["Name"].count()) print("Family: ", df_family["Name"].count()) print("Friend: ", df_friend["Name"].count()) print( "Other: ", duplicates["Name"].count() - df_family["Name"].count() - df_friend["Name"].count(), ) ## Making a column for just Connected Survival df_data["Connected_Survival"] = 0.5 for ticket_num, df_grp in df_data.groupby("Ticket"): if len(df_grp) > 1: # Duplicates in Ticket for index, row in df_grp.iterrows(): smax = df_grp.drop(index).Survived.max() smin = df_grp.drop(index).Survived.min() pid = row.PassengerId if smax == 1.0: df_data.loc[df_data["PassengerId"] == pid, "Connected_Survival"] = 1 elif smin == 0.0: df_data.loc[df_data["PassengerId"] == pid, "Connected_Survival"] = 0 # Embarked Filling by checking Fare df_data[df_data["Embarked"].isnull()][["Embarked", "Pclass", "Fare"]] # Check their relation in groups df_data.groupby(["Embarked", "Pclass"])[["Fare"]].median() # 80 is closest to C1 - Assigning C and mapping # Filling missing values with the value that has greatest frequency df_data["Embarked"] = df_data["Embarked"].fillna("C") # Mapping df_data["Embarked#"] = df_data["Embarked"].map({"S": 1, "C": 2, "Q": 3}) df_data.head() ## Extracting Titles from Name - might help in filling Age df_data["Title"] = df_data["Name"].str.extract("([A-Za-z]+)\.", expand=False) df_data["Title"] = df_data["Title"].replace( [ "Capt", "Col", "Rev", "Don", "Countess", "Jonkheer", "Dona", "Sir", "Dr", "Major", "Dr", ], "Rare", ) df_data["Title"] = df_data["Title"].replace(["Mlle", "Mme", "Ms"], "Miss") df_data["Title"] = df_data["Title"].replace(["Lady"], "Mrs") df_data["Title"] = df_data["Title"].map( {"Mr": 0, "Rare": 1, "Master": 2, "Miss": 3, "Mrs": 4} ) df_data.head() # #### Filling Values of Age # 1. Linear Regression/XGBRegressor # 2. Using Title # 3. Using Pclass and Sex display(df_data.Age.describe()) display(df_train["Age"].isna().sum()) display(df_test["Age"].isna().sum()) # By Title - "Mr":0, "Rare" : 1, "Master" : 2,"Miss" : 3, "Mrs" : 4 df_data.groupby("Title")["Age"].median().values title_age = df_data.groupby("Title")["Age"].median().values df_data["Age_pred1"] = df_data["Age"] for i in range(5): df_data.loc[ (df_data["Title"] == i) & (df_data["Age"].isnull()), "Age_pred1" ] = title_age[i] # By Linear Regression x_train = df_data[df_data.Age.notnull()] y_train = df_data[df_data.Age.notnull()]["Age"] x_test = df_data[df_data.Age.isnull()] # select_feature = ['Sex#', 'Pclass', 'Title', 'FareBin','Embarked#', 'IsAlone'] select_feature = ["Sex#", "Pclass", "Title", "FareBin"] reg = LinearRegression() reg.fit(x_train[select_feature], y_train) reg.score(x_train[select_feature], y_train) df_data["Age_pred3"] = df_data["Age"] df_data.loc[df_data["Age"].isnull(), "Age_pred2"] = reg.predict( x_test[select_feature] ).astype("int") xgb = XGBRegressor() xgb.fit(x_train[select_feature], y_train) xgb.score(x_train[select_feature], y_train) df_data["Age_pred3"] = df_data["Age"] df_data.loc[df_data["Age"].isnull(), "Age_pred3"] = xgb.predict( x_test[select_feature] ).astype("int") # Higher survival rate for age <16, we put filter for predicting minors df_data["Minor_pred1"] = ((df_data["Age_pred1"]) < 16) * 1 df_data["Minor_pred2"] = ((df_data["Age_pred2"]) < 16) * 1 df_data["Minor_pred3"] = ((df_data["Age_pred3"]) < 16) * 1 # Bucketing Age like Fare df_data["AgeBin_pred1"] = pd.qcut(df_data["Age_pred1"], 5) # df_data['AgeBin_pred2'] = pd.qcut(df_data['Age_pred2'], 5) df_data["AgeBin_pred3"] = pd.qcut(df_data["Age_pred3"], 5) df_data["AgeBin_pred1"] = label_encoder.fit_transform(df_data["AgeBin_pred1"]) # df_data['AgeBin_pred2'] = label_encoder.fit_transform(df_data['AgeBin_pred2']) df_data["AgeBin_pred3"] = label_encoder.fit_transform(df_data["AgeBin_pred3"]) ## Assumption - What if a missing cabin value means that the passenger was not assigned a premium cabin? df_data["Cabin"] = df_data["Cabin"].fillna(0) def cabin(x): try: if x != 0: return 1 else: return 0 except: return 0 df_data["Cabin"] = df_data["Cabin"].apply(cabin) # ## Building Models df_data[["PassengerId", "Pclass", "Sex#"]] = df_data[ ["PassengerId", "Pclass", "Sex#"] ].astype("int32") df_data.head() df_train = df_data[: len(df_train)] df_test = df_data[len(df_train) :] train_features = [ "Survived", "Pclass", "SibSp", "Parch", "Fare", "Sex#", "Fsize", "IsAlone", "FareBin", "Connected_Survival", "Embarked#", "Age_pred1", "Age_pred3", "Minor_pred1", "Minor_pred3", "AgeBin_pred1", "AgeBin_pred3", "Cabin", ] corr_mat = df_train[train_features].astype(float).corr() corr_mat_fil = corr_mat.loc[:, "Survived"].sort_values(ascending=False) corr_mat_fil = pd.DataFrame(data=corr_mat_fil[1:]) plt.figure(figsize=(20, 12)) bar = sns.barplot( x=corr_mat_fil.Survived.abs(), y=corr_mat_fil.index, data=corr_mat_fil, palette="deep", ) train_features = [ "Survived", "Sex#", "Connected_Survival", "FareBin", "Minor_pred3", "Embarked#", "AgeBin_pred3", "Parch", "Age_pred3", "IsAlone", "Pclass", "Cabin", ] corr_mat = df_train[train_features].astype(float).corr() plt.figure(figsize=(20, 10)) sns.heatmap(corr_mat.abs(), annot=True) plt.show() # ## Random Forest # selected_features = ['Sex#', 'Pclass', 'FareBin', 'Connected_Survival', 'Minor_pred3', 'Embarked#', 'Cabin'] # selected_features = ['Sex#', 'Pclass', 'FareBin', 'Connected_Survival', 'Cabin'] selected_features = [ "Sex#", "Pclass", "FareBin", "Connected_Survival", "Minor_pred3", "Embarked#", "IsAlone", ] df_train = df_data[: len(df_train)] df_test = df_data[len(df_train) :] x_train = df_train[selected_features] y_train = df_train["Survived"] x_test = df_test[selected_features] model = RandomForestClassifier(random_state=2) grid_parameters = { "n_estimators": [i for i in range(300, 601, 50)], "min_samples_split": [10, 20, 30, 40], } grid = GridSearchCV(estimator=model, param_grid=grid_parameters) grid_result = grid.fit(x_train, y_train) # summarize results print("Best: {} using {}".format(grid_result.best_score_, grid_result.best_params_)) n_estimator = grid_result.best_params_["n_estimators"] min_samples_split = grid_result.best_params_["min_samples_split"] RFC = RandomForestClassifier(random_state=2, n_estimators=300, min_samples_split=40) RFC.fit(x_train, y_train) y_pred = RFC.predict(x_test) output = pd.DataFrame({"PassengerId": df_test["PassengerId"], "Survived": y_pred}) output = output.astype("int") # output.to_csv('predictionnocwisalone1.csv', index=False) # print('Your file was successfully saved!') # ## XGBClassifier selected_features = [ "Sex#", "Pclass", "FareBin", "Connected_Survival", "Minor_pred3", "Embarked#", "Cabin", ] df_train = df_data[: len(df_train)] df_test = df_data[len(df_train) :] x_train = df_train[selected_features] y_train = df_train["Survived"] x_test = df_test[selected_features] xgbc = XGBClassifier(random_state=2) xgbc.fit(x_train, y_train) y_pred = xgbc.predict(x_test) output = pd.DataFrame({"PassengerId": df_test["PassengerId"], "Survived": y_pred}) output = output.astype("int") ##print('Your file was successfully saved!')
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/248/69248657.ipynb
null
null
[{"Id": 69248657, "ScriptId": 18903810, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7712878, "CreationDate": "07/28/2021 15:44:17", "VersionNumber": 1.0, "Title": "notebooka67b026c31", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 387.0, "LinesInsertedFromPrevious": 387.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Titanic-Machine Learning from Disaster - Ishan Saksena # ### Introduction # The sinking of the Titanic is one of the most infamous shipwrecks in history. # On April 15, 1912, during her maiden voyage, the widely considered “unsinkable” RMS Titanic sank after colliding with an iceberg. Unfortunately, there weren’t enough lifeboats for everyone onboard, resulting in the death of 1502 out of 2224 passengers and crew. # Aim: Predicting the survival status of the passengers in test.csv import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pdb # Python debugger from IPython.display import Image, display from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.feature_selection import RFECV from sklearn.metrics import accuracy_score from sklearn.tree import export_graphviz from sklearn.impute import KNNImputer from xgboost import XGBRegressor from xgboost import XGBClassifier from sklearn.model_selection import ( cross_val_score, StratifiedKFold, learning_curve, train_test_split, GridSearchCV, ) sns.set() pd.set_option("display.max_colwidth", None) pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", None) df_train = pd.read_csv("../input/titanic/train.csv") df_test = pd.read_csv("../input/titanic/test.csv") df_data = df_train.append(df_test).reset_index(drop=True) df_submission = pd.read_csv("../input/titanic/gender_submission.csv") df_train.head() print("Training null values\n") print(df_train.isnull().sum()) print("-" * 30) print("Testing null values\n") print(df_test.isnull().sum()) print("Training info\n") print(df_train.info()) print("-" * 30) print("Testing info\n") print(df_test.info()) df_data.describe() # ## Some Observations from the Data # ### Features # PassengerId # Survived # Pclass (Ticket class) # Name # Sex # Age # SibSp (# of siblings / spouses aboard the Titanic) # Parch (# of parents / children aboard the Titanic) # Ticket # Fare # Cabin # Embarked (Port of Embarkation) # # ### Missing Values # Age and Cabin have a number of missing values, Embarked has some # ### Type # Categorical features: Survived, Sex, Embarked, and Pclass(ordinal). # Numercial features: Age, Fare. Discrete: SibSp, Parch. # # ### Distribution # Only Name has 100% unique values # ## Assumtions based on data analysis # ### Correlating. # - Will use heatmaps and other visual methods to figure out which data correlates to survival the most. # ### Completing. # - Fill Age value - A vital component # - Embarked value # - Cabin value - Has a lot of missing values and might not have high correlation. Potential Drop. # ### Correcting. # - Name and ID have no correlation with survival and can be dropped for the model. # - Some duplicates in the Ticket value is an interesting discovery. We can use this piece of information later on with a simple assumption - Passengers with same ticket values may be acquainted with each other. # - Cabin can be dropped since we cannot assume that people with missing cabin entry were any different. # ### Creating. # - We could build a feature known as Family - Using the columns Sibsp and Parch. # - We could extract the title from Name # - Turning the Age and Fair features into ordinal categorical features with Age Bands and Fair Bins # ### Classifying. # - Some Assumptions: # - Women (Sex=female) and Children (Age<16) had higher survival rate. # - The upper-class passengers (Pclass=1) had higher survival rate. # ## Exporatory Data Analysis (EDA) display(df_data[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean()) display(df_data[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean()) display(df_data[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean()) display(df_data[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean()) display(df_data[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean()) display(df_data[["Embarked", "Survived"]].groupby(["Embarked"], as_index=False).mean()) sns.countplot(x=df_data["Embarked"], hue=df_data["Survived"]) sns.countplot(x=df_data["Sex"], hue=df_data["Survived"]) # ## Feature Engineering and Data Cleaning # mapping the sex feature df_data["Sex#"] = df_data["Sex"].map({"male": 0, "female": 1}) # Create new feature for Family df_data["Fsize"] = ( df_data["Parch"] + df_data["SibSp"] + 1 ) # Family = Self + Sib + Parents/Children # Create new feature for IsAlone df_data["IsAlone"] = 0 df_data.loc[df_data.Fsize == 1, "IsAlone"] = 1 # Plot sns.countplot(x=df_data["IsAlone"], hue=df_data["Survived"]) plt.show() # This looks like important data # Making bins df_data["Fare"].fillna(80.0, inplace=True) df_data.isna().sum() df_data["FareBin"] = pd.qcut(df_data["Fare"], 6) # Mapping the bins label_encoder = LabelEncoder() df_data["FareBin"] = label_encoder.fit_transform(df_data["FareBin"]) # splits again beacuse we just engineered new feature df_train = df_data[: len(df_train)] df_test = df_data[len(df_train) :] # Training set and labels x_train = df_train.drop(labels=["Survived", "PassengerId"], axis=1) y_train = df_train["Survived"] # show columns x_train.columns # Extracting Family name by Regex - might help later df_data["Fname"] = df_data["Name"].str.extract("([A-Za-z]+.[A-Za-z]+)\,", expand=True) # Assuming the cause of duplicate tickets is because the passengers knew each other. duplicates = [] for uniq in df_data["Ticket"].unique(): temp = df_data.loc[df_data["Ticket"] == uniq, "Name"] if temp.count() > 1: duplicates.append( df_data.loc[ df_data["Ticket"] == uniq, ["Name", "Ticket", "Fare", "FareBin", "Fsize", "Survived"], ] ) duplicates = pd.concat(duplicates) duplicates.head(20) df_friend = duplicates.loc[(duplicates.Fsize == 1) & (duplicates.Survived.notnull())] df_family = duplicates.loc[(duplicates.Fsize > 1) & (duplicates.Survived.notnull())] display(df_friend.head(), df_family.head()) print("The Duplicates: ", duplicates["Name"].count()) print("Family: ", df_family["Name"].count()) print("Friend: ", df_friend["Name"].count()) print( "Other: ", duplicates["Name"].count() - df_family["Name"].count() - df_friend["Name"].count(), ) ## Making a column for just Connected Survival df_data["Connected_Survival"] = 0.5 for ticket_num, df_grp in df_data.groupby("Ticket"): if len(df_grp) > 1: # Duplicates in Ticket for index, row in df_grp.iterrows(): smax = df_grp.drop(index).Survived.max() smin = df_grp.drop(index).Survived.min() pid = row.PassengerId if smax == 1.0: df_data.loc[df_data["PassengerId"] == pid, "Connected_Survival"] = 1 elif smin == 0.0: df_data.loc[df_data["PassengerId"] == pid, "Connected_Survival"] = 0 # Embarked Filling by checking Fare df_data[df_data["Embarked"].isnull()][["Embarked", "Pclass", "Fare"]] # Check their relation in groups df_data.groupby(["Embarked", "Pclass"])[["Fare"]].median() # 80 is closest to C1 - Assigning C and mapping # Filling missing values with the value that has greatest frequency df_data["Embarked"] = df_data["Embarked"].fillna("C") # Mapping df_data["Embarked#"] = df_data["Embarked"].map({"S": 1, "C": 2, "Q": 3}) df_data.head() ## Extracting Titles from Name - might help in filling Age df_data["Title"] = df_data["Name"].str.extract("([A-Za-z]+)\.", expand=False) df_data["Title"] = df_data["Title"].replace( [ "Capt", "Col", "Rev", "Don", "Countess", "Jonkheer", "Dona", "Sir", "Dr", "Major", "Dr", ], "Rare", ) df_data["Title"] = df_data["Title"].replace(["Mlle", "Mme", "Ms"], "Miss") df_data["Title"] = df_data["Title"].replace(["Lady"], "Mrs") df_data["Title"] = df_data["Title"].map( {"Mr": 0, "Rare": 1, "Master": 2, "Miss": 3, "Mrs": 4} ) df_data.head() # #### Filling Values of Age # 1. Linear Regression/XGBRegressor # 2. Using Title # 3. Using Pclass and Sex display(df_data.Age.describe()) display(df_train["Age"].isna().sum()) display(df_test["Age"].isna().sum()) # By Title - "Mr":0, "Rare" : 1, "Master" : 2,"Miss" : 3, "Mrs" : 4 df_data.groupby("Title")["Age"].median().values title_age = df_data.groupby("Title")["Age"].median().values df_data["Age_pred1"] = df_data["Age"] for i in range(5): df_data.loc[ (df_data["Title"] == i) & (df_data["Age"].isnull()), "Age_pred1" ] = title_age[i] # By Linear Regression x_train = df_data[df_data.Age.notnull()] y_train = df_data[df_data.Age.notnull()]["Age"] x_test = df_data[df_data.Age.isnull()] # select_feature = ['Sex#', 'Pclass', 'Title', 'FareBin','Embarked#', 'IsAlone'] select_feature = ["Sex#", "Pclass", "Title", "FareBin"] reg = LinearRegression() reg.fit(x_train[select_feature], y_train) reg.score(x_train[select_feature], y_train) df_data["Age_pred3"] = df_data["Age"] df_data.loc[df_data["Age"].isnull(), "Age_pred2"] = reg.predict( x_test[select_feature] ).astype("int") xgb = XGBRegressor() xgb.fit(x_train[select_feature], y_train) xgb.score(x_train[select_feature], y_train) df_data["Age_pred3"] = df_data["Age"] df_data.loc[df_data["Age"].isnull(), "Age_pred3"] = xgb.predict( x_test[select_feature] ).astype("int") # Higher survival rate for age <16, we put filter for predicting minors df_data["Minor_pred1"] = ((df_data["Age_pred1"]) < 16) * 1 df_data["Minor_pred2"] = ((df_data["Age_pred2"]) < 16) * 1 df_data["Minor_pred3"] = ((df_data["Age_pred3"]) < 16) * 1 # Bucketing Age like Fare df_data["AgeBin_pred1"] = pd.qcut(df_data["Age_pred1"], 5) # df_data['AgeBin_pred2'] = pd.qcut(df_data['Age_pred2'], 5) df_data["AgeBin_pred3"] = pd.qcut(df_data["Age_pred3"], 5) df_data["AgeBin_pred1"] = label_encoder.fit_transform(df_data["AgeBin_pred1"]) # df_data['AgeBin_pred2'] = label_encoder.fit_transform(df_data['AgeBin_pred2']) df_data["AgeBin_pred3"] = label_encoder.fit_transform(df_data["AgeBin_pred3"]) ## Assumption - What if a missing cabin value means that the passenger was not assigned a premium cabin? df_data["Cabin"] = df_data["Cabin"].fillna(0) def cabin(x): try: if x != 0: return 1 else: return 0 except: return 0 df_data["Cabin"] = df_data["Cabin"].apply(cabin) # ## Building Models df_data[["PassengerId", "Pclass", "Sex#"]] = df_data[ ["PassengerId", "Pclass", "Sex#"] ].astype("int32") df_data.head() df_train = df_data[: len(df_train)] df_test = df_data[len(df_train) :] train_features = [ "Survived", "Pclass", "SibSp", "Parch", "Fare", "Sex#", "Fsize", "IsAlone", "FareBin", "Connected_Survival", "Embarked#", "Age_pred1", "Age_pred3", "Minor_pred1", "Minor_pred3", "AgeBin_pred1", "AgeBin_pred3", "Cabin", ] corr_mat = df_train[train_features].astype(float).corr() corr_mat_fil = corr_mat.loc[:, "Survived"].sort_values(ascending=False) corr_mat_fil = pd.DataFrame(data=corr_mat_fil[1:]) plt.figure(figsize=(20, 12)) bar = sns.barplot( x=corr_mat_fil.Survived.abs(), y=corr_mat_fil.index, data=corr_mat_fil, palette="deep", ) train_features = [ "Survived", "Sex#", "Connected_Survival", "FareBin", "Minor_pred3", "Embarked#", "AgeBin_pred3", "Parch", "Age_pred3", "IsAlone", "Pclass", "Cabin", ] corr_mat = df_train[train_features].astype(float).corr() plt.figure(figsize=(20, 10)) sns.heatmap(corr_mat.abs(), annot=True) plt.show() # ## Random Forest # selected_features = ['Sex#', 'Pclass', 'FareBin', 'Connected_Survival', 'Minor_pred3', 'Embarked#', 'Cabin'] # selected_features = ['Sex#', 'Pclass', 'FareBin', 'Connected_Survival', 'Cabin'] selected_features = [ "Sex#", "Pclass", "FareBin", "Connected_Survival", "Minor_pred3", "Embarked#", "IsAlone", ] df_train = df_data[: len(df_train)] df_test = df_data[len(df_train) :] x_train = df_train[selected_features] y_train = df_train["Survived"] x_test = df_test[selected_features] model = RandomForestClassifier(random_state=2) grid_parameters = { "n_estimators": [i for i in range(300, 601, 50)], "min_samples_split": [10, 20, 30, 40], } grid = GridSearchCV(estimator=model, param_grid=grid_parameters) grid_result = grid.fit(x_train, y_train) # summarize results print("Best: {} using {}".format(grid_result.best_score_, grid_result.best_params_)) n_estimator = grid_result.best_params_["n_estimators"] min_samples_split = grid_result.best_params_["min_samples_split"] RFC = RandomForestClassifier(random_state=2, n_estimators=300, min_samples_split=40) RFC.fit(x_train, y_train) y_pred = RFC.predict(x_test) output = pd.DataFrame({"PassengerId": df_test["PassengerId"], "Survived": y_pred}) output = output.astype("int") # output.to_csv('predictionnocwisalone1.csv', index=False) # print('Your file was successfully saved!') # ## XGBClassifier selected_features = [ "Sex#", "Pclass", "FareBin", "Connected_Survival", "Minor_pred3", "Embarked#", "Cabin", ] df_train = df_data[: len(df_train)] df_test = df_data[len(df_train) :] x_train = df_train[selected_features] y_train = df_train["Survived"] x_test = df_test[selected_features] xgbc = XGBClassifier(random_state=2) xgbc.fit(x_train, y_train) y_pred = xgbc.predict(x_test) output = pd.DataFrame({"PassengerId": df_test["PassengerId"], "Survived": y_pred}) output = output.astype("int") ##print('Your file was successfully saved!')
false
0
4,747
0
4,747
4,747
69248547
<jupyter_start><jupyter_text>clrp roberta base ### Context This model is pre-trained on CommonLit readability dataset on MaskedLanguageModeling task. ### Content This can be used as a normal Roberta-base model. Kaggle dataset identifier: clrp-roberta-base <jupyter_script># Source: https://www.kaggle.com/andretugan/pre-trained-roberta-solution-in-pytorch # Above kernel takes a pre-trained base from: https://www.kaggle.com/maunish/clrp-roberta-base. The pre-training in less than a dozen loc is done here: https://www.kaggle.com/maunish/clrp-pytorch-roberta-pretrain and improves the score by about 0.07. # All upvotes to be re-directed to above kernels import os import math import random import time import numpy as np import pandas as pd import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import DataLoader from transformers import AdamW from transformers import AutoTokenizer from transformers import AutoModel from transformers import AutoConfig from transformers import get_cosine_schedule_with_warmup from sklearn.model_selection import KFold import gc ##Enable automatic garbage collection gc.enable() NUM_FOLDS = 5 NUM_EPOCHS = 3 BATCH_SIZE = 16 MAX_LEN = 248 EVAL_SCHEDULE = [(0.50, 16), (0.49, 8), (0.48, 4), (0.47, 2), (-1.0, 1)] ROBERTA_PATH = "../input/clrp-roberta-base/clrp_roberta_base" TOKENIZER_PATH = "../input/clrp-roberta-base/clrp_roberta_base" DEVICE = "cuda" if torch.cuda.is_available() else "cpu" def set_random_seed(random_seed): random.seed(random_seed) np.random.seed(random_seed) os.environ["PYTHONHASHSEED"] = str(random_seed) torch.manual_seed(random_seed) torch.cuda.manual_seed(random_seed) torch.cuda.manual_seed_all(random_seed) torch.backends.cudnn.deterministic = True train_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv") train_df.drop( train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index, inplace=True, ) train_df.reset_index(drop=True, inplace=True) test_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv") submission_df = pd.read_csv( "/kaggle/input/commonlitreadabilityprize/sample_submission.csv" ) tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH) class LitDataset(Dataset): def __init__(self, df, inference_only=False): super().__init__() self.df = df self.inference_only = inference_only excerpts = df.excerpt.tolist() if not self.inference_only: self.target = torch.tensor(df.target.values, dtype=torch.float32) ##self.encoded = tokenizer.batch_encode_plus(self.text,padding = 'max_length',max_length = MAX_LEN,truncation = True,return_attention_mask=True) self.encoded, self.start_pos = [], [] for excerpt in excerpts: sentences = tokenize.sent_tokenize(excerpt) split_tok = tokenizer.batch_encode_plus(sentences) input_ids = list(np.concatenate(split_tok["input_ids"])) attention_mask = list(np.concatenate(split_tok["attention_mask"])) if len(split_tok["input_ids"][0]) > MAX_LEN: input_ids, attention_mask = ( input_ids[: MAX_LEN - 1], attention_mask[: MAX_LEN - 1], ) input_ids.extend([2]) attention_mask.extend([1]) split_tok["input_ids"], split_tok["attention_mask"] = [input_ids], [ attention_mask ] else: pad_cnt = MAX_LEN - len(split_tok["input_ids"][0]) input_ids.extend([1] * pad_cnt) attention_mask.extend([0] * pad_cnt) split_tok["input_ids"], split_tok["attention_mask"] = [input_ids], [ attention_mask ] self.encoded.append(split_tok) self.start_pos.append( np.nonzero(np.array(split_tok["input_ids"][0]) == 0)[0] ) def __len__(self): return len(self.df) def __getitem__(self, index): input_ids = torch.tensor(self.encoded["input_ids"][index]) attention_mask = torch.tensor(self.encoded["attention_mask"][index]) if self.inference_only: return (input_ids, attention_mask, self.start_pos[index]) else: target = self.target[index] return (input_ids, attention_mask, self.start_pos[index], target) class LitModel(nn.Module): def __init__(self): super().__init__() config = AutoConfig.from_pretrained(ROBERTA_PATH) config.update( { "output_hidden_states": True, "hidden_dropout_prob": 0.0, "layer_norm_eps": 1e-7, } ) self.roberta = AutoModel.from_pretrained(ROBERTA_PATH, config=config) self.layer_norm = nn.LayerNorm(768) self.regressor = nn.Sequential(nn.Linear(768, 1)) def forward(self, input_ids, attention_mask, start_pos): ##<bs, num_words> mean_emb_weighted = torch.empty(BATCH_SIZE, 768).to(DEVICE) roberta_output = self.roberta( input_ids=input_ids, attention_mask=attention_mask ) ##Here we will take the avg of all embeddings to represent the sentence last_layer = roberta_output[0] ##<bs, num_words, dim> for row in last_layer: ##Begin our dod. <num_words, dim> avg_embeddings = torch.empty(768).to(DEVICE) for cnt, pos in enumerate(start_pos): if cnt != len(start_pos) - 1: emb = row[pos : start_pos[cnt + 1], :] avg_embeddings += torch.mean(emb, 0) else: mask = torch.tensor( split_tok["attention_mask"][0][pos : len(split_tok["input_ids"][0])] ).unsqueeze(-1) nonmask = np.count_nonzero(mask.squeeze()) emb = row[pos : len(split_tok["input_ids"][0]), :] avg_embeddings += torch.sum(emb * mask, 0) / nonmask torch.cat(mean_emb_weighted, avg_embeddings / len(start_pos)) """mask = attention_mask.unsqueeze(-1).expand(last_layer.size()).float() sum_emb = torch.sum(last_layer * mask, 1) ##num of 1's is the num of tokens. Rest are pads sum_mask = mask.sum(1) ##take care of case where there is no padding sum_mask = torch.clamp(sum_mask, min=1e-9) mean_emb = sum_emb / sum_mask""" norm_mean_emb = self.layer_norm(mean_emb_weighted) out = self.regressor(norm_mean_emb) return out def eval_mse(model, data_loader): model.eval() mse_sum = 0 with torch.no_grad(): for batch_num, (input_ids, attention_mask, start_pos, target) in enumerate( data_loader ): input_ids = input_ids.to(DEVICE) attention_mask = attention_mask.to(DEVICE) start_pos = start_pos.to(DEVICE) target = target.to(DEVICE) pred = model(input_ids, attention_mask, start_pos) mse_sum += nn.MSELoss(reduction="sum")(pred.flatten(), target).item() return mse_sum / len(data_loader.dataset) def predict(model, data_loader): model.eval() result = np.zeros(len(data_loader.dataset)) index = 0 with torch.no_grad(): for batch_num, (input_ids, attention_mask, start_pos) in enumerate(data_loader): input_ids = input_ids.to(DEVICE) attention_mask = attention_mask.to(DEVICE) start_pos = start_pos.to(DEVICE) pred = model(input_ids, attention_mask, start_pos) result[index : index + pred.shape[0]] = pred.flatten().to("cpu") index += pred.shape[0] return result def train( model, model_path, train_loader, val_loader, optimizer, scheduler=None, num_epochs=NUM_EPOCHS, ): best_val_rmse = None best_epoch = 0 step = 0 last_eval_step = 0 eval_period = EVAL_SCHEDULE[0][1] start = time.time() for epoch in range(num_epochs): val_rmse = None for batch_num, (input_ids, attention_mask, start_pos, target) in enumerate( train_loader ): input_ids = input_ids.to(DEVICE) attention_mask = attention_mask.to(DEVICE) start_pos = start_pos.to(DEVICE) target = target.to(DEVICE) optimizer.zero_grad() model.train() pred = model(input_ids, attention_mask, start_pos) mse = nn.MSELoss(reduction="mean")(pred.flatten(), target) mse.backward() optimizer.step() if scheduler: scheduler.step() if step >= last_eval_step + eval_period: elapsed_seconds = time.time() - start num_steps = step - last_eval_step ##print(f"\n{num_steps} steps took {elapsed_seconds:0.3} seconds") last_eval_step = step val_rmse = math.sqrt(eval_mse(model, val_loader)) print( f"Epoch: {epoch} batch_num: {batch_num}", f"val_rmse: {val_rmse:0.4}", ) for rmse, period in EVAL_SCHEDULE: if val_rmse >= rmse: eval_period = period break if not best_val_rmse or val_rmse < best_val_rmse: best_val_rmse = val_rmse best_epoch = epoch torch.save(model.state_dict(), model_path) print(f"New best_val_rmse: {best_val_rmse:0.4}") else: print( f"Still best_val_rmse: {best_val_rmse:0.4}", f"(from epoch {best_epoch})", ) start = time.time() step += 1 return best_val_rmse def create_optimizer(model): named_parameters = list(model.named_parameters()) roberta_parameters = named_parameters[:197] attention_parameters = named_parameters[199:203] regressor_parameters = named_parameters[203:] attention_group = [params for (name, params) in attention_parameters] regressor_group = [params for (name, params) in regressor_parameters] parameters = [] parameters.append({"params": attention_group}) parameters.append({"params": regressor_group}) for layer_num, (name, params) in enumerate(roberta_parameters): weight_decay = 0.0 if "bias" in name else 0.01 lr = 2e-5 if layer_num >= 69: lr = 5e-5 if layer_num >= 133: lr = 1e-4 parameters.append({"params": params, "weight_decay": weight_decay, "lr": lr}) return AdamW(parameters) gc.collect() SEED = 1000 list_val_rmse = [] kfold = KFold(n_splits=NUM_FOLDS, random_state=SEED, shuffle=True) for fold, (train_indices, val_indices) in enumerate(kfold.split(train_df)): print(f"\nFold {fold + 1}/{NUM_FOLDS}") model_path = f"model_{fold + 1}.pth" set_random_seed(SEED + fold) train_dataset = LitDataset(train_df.loc[train_indices]) val_dataset = LitDataset(train_df.loc[val_indices]) train_loader = DataLoader( train_dataset, batch_size=BATCH_SIZE, drop_last=True, shuffle=True, num_workers=2, ) val_loader = DataLoader( val_dataset, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=2, ) set_random_seed(SEED + fold) model = LitModel().to(DEVICE) optimizer = create_optimizer(model) scheduler = get_cosine_schedule_with_warmup( optimizer, num_training_steps=NUM_EPOCHS * len(train_loader), num_warmup_steps=50, ) list_val_rmse.append( train( model, model_path, train_loader, val_loader, optimizer, scheduler=scheduler ) ) del model gc.collect() print("\nPerformance estimates:") print(list_val_rmse) print("Mean:", np.array(list_val_rmse).mean()) test_dataset = LitDataset(test_df, inference_only=True) all_predictions = np.zeros((len(list_val_rmse), len(test_df))) test_dataset = LitDataset(test_df, inference_only=True) test_loader = DataLoader( test_dataset, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=2 ) for index in range(len(list_val_rmse)): model_path = f"model_{index + 1}.pth" print(f"\nUsing {model_path}") model = LitModel() model.load_state_dict(torch.load(model_path)) model.to(DEVICE) all_predictions[index] = predict(model, test_loader) del model gc.collect() predictions = all_predictions.mean(axis=0) submission_df.target = predictions print(submission_df) submission_df.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/248/69248547.ipynb
clrp-roberta-base
maunish
[{"Id": 69248547, "ScriptId": 18893067, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4338270, "CreationDate": "07/28/2021 15:42:43", "VersionNumber": 3.0, "Title": "ComLit-MeanExperiments", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 287.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 281.0, "LinesInsertedFromFork": 58.0, "LinesDeletedFromFork": 114.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 229.0, "TotalVotes": 0}]
[{"Id": 92184183, "KernelVersionId": 69248547, "SourceDatasetVersionId": 2379259}]
[{"Id": 2379259, "DatasetId": 1382020, "DatasourceVersionId": 2421155, "CreatorUserId": 2911143, "LicenseName": "Unknown", "CreationDate": "06/29/2021 09:07:26", "VersionNumber": 57.0, "Title": "clrp roberta base", "Slug": "clrp-roberta-base", "Subtitle": "roberta-base pretrained on clrp dataset", "Description": "### Context\n\nThis model is pre-trained on CommonLit readability dataset on MaskedLanguageModeling task.\n\n\n### Content\n\nThis can be used as a normal Roberta-base model.", "VersionNotes": "notebook version 70", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1382020, "CreatorUserId": 2911143, "OwnerUserId": 2911143.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2379259.0, "CurrentDatasourceVersionId": 2421155.0, "ForumId": 1401212, "Type": 2, "CreationDate": "06/01/2021 12:06:04", "LastActivityDate": "06/01/2021", "TotalViews": 9540, "TotalDownloads": 947, "TotalVotes": 78, "TotalKernels": 34}]
[{"Id": 2911143, "UserName": "maunish", "DisplayName": "Maunish dave", "RegisterDate": "03/08/2019", "PerformanceTier": 3}]
# Source: https://www.kaggle.com/andretugan/pre-trained-roberta-solution-in-pytorch # Above kernel takes a pre-trained base from: https://www.kaggle.com/maunish/clrp-roberta-base. The pre-training in less than a dozen loc is done here: https://www.kaggle.com/maunish/clrp-pytorch-roberta-pretrain and improves the score by about 0.07. # All upvotes to be re-directed to above kernels import os import math import random import time import numpy as np import pandas as pd import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import DataLoader from transformers import AdamW from transformers import AutoTokenizer from transformers import AutoModel from transformers import AutoConfig from transformers import get_cosine_schedule_with_warmup from sklearn.model_selection import KFold import gc ##Enable automatic garbage collection gc.enable() NUM_FOLDS = 5 NUM_EPOCHS = 3 BATCH_SIZE = 16 MAX_LEN = 248 EVAL_SCHEDULE = [(0.50, 16), (0.49, 8), (0.48, 4), (0.47, 2), (-1.0, 1)] ROBERTA_PATH = "../input/clrp-roberta-base/clrp_roberta_base" TOKENIZER_PATH = "../input/clrp-roberta-base/clrp_roberta_base" DEVICE = "cuda" if torch.cuda.is_available() else "cpu" def set_random_seed(random_seed): random.seed(random_seed) np.random.seed(random_seed) os.environ["PYTHONHASHSEED"] = str(random_seed) torch.manual_seed(random_seed) torch.cuda.manual_seed(random_seed) torch.cuda.manual_seed_all(random_seed) torch.backends.cudnn.deterministic = True train_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv") train_df.drop( train_df[(train_df.target == 0) & (train_df.standard_error == 0)].index, inplace=True, ) train_df.reset_index(drop=True, inplace=True) test_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv") submission_df = pd.read_csv( "/kaggle/input/commonlitreadabilityprize/sample_submission.csv" ) tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH) class LitDataset(Dataset): def __init__(self, df, inference_only=False): super().__init__() self.df = df self.inference_only = inference_only excerpts = df.excerpt.tolist() if not self.inference_only: self.target = torch.tensor(df.target.values, dtype=torch.float32) ##self.encoded = tokenizer.batch_encode_plus(self.text,padding = 'max_length',max_length = MAX_LEN,truncation = True,return_attention_mask=True) self.encoded, self.start_pos = [], [] for excerpt in excerpts: sentences = tokenize.sent_tokenize(excerpt) split_tok = tokenizer.batch_encode_plus(sentences) input_ids = list(np.concatenate(split_tok["input_ids"])) attention_mask = list(np.concatenate(split_tok["attention_mask"])) if len(split_tok["input_ids"][0]) > MAX_LEN: input_ids, attention_mask = ( input_ids[: MAX_LEN - 1], attention_mask[: MAX_LEN - 1], ) input_ids.extend([2]) attention_mask.extend([1]) split_tok["input_ids"], split_tok["attention_mask"] = [input_ids], [ attention_mask ] else: pad_cnt = MAX_LEN - len(split_tok["input_ids"][0]) input_ids.extend([1] * pad_cnt) attention_mask.extend([0] * pad_cnt) split_tok["input_ids"], split_tok["attention_mask"] = [input_ids], [ attention_mask ] self.encoded.append(split_tok) self.start_pos.append( np.nonzero(np.array(split_tok["input_ids"][0]) == 0)[0] ) def __len__(self): return len(self.df) def __getitem__(self, index): input_ids = torch.tensor(self.encoded["input_ids"][index]) attention_mask = torch.tensor(self.encoded["attention_mask"][index]) if self.inference_only: return (input_ids, attention_mask, self.start_pos[index]) else: target = self.target[index] return (input_ids, attention_mask, self.start_pos[index], target) class LitModel(nn.Module): def __init__(self): super().__init__() config = AutoConfig.from_pretrained(ROBERTA_PATH) config.update( { "output_hidden_states": True, "hidden_dropout_prob": 0.0, "layer_norm_eps": 1e-7, } ) self.roberta = AutoModel.from_pretrained(ROBERTA_PATH, config=config) self.layer_norm = nn.LayerNorm(768) self.regressor = nn.Sequential(nn.Linear(768, 1)) def forward(self, input_ids, attention_mask, start_pos): ##<bs, num_words> mean_emb_weighted = torch.empty(BATCH_SIZE, 768).to(DEVICE) roberta_output = self.roberta( input_ids=input_ids, attention_mask=attention_mask ) ##Here we will take the avg of all embeddings to represent the sentence last_layer = roberta_output[0] ##<bs, num_words, dim> for row in last_layer: ##Begin our dod. <num_words, dim> avg_embeddings = torch.empty(768).to(DEVICE) for cnt, pos in enumerate(start_pos): if cnt != len(start_pos) - 1: emb = row[pos : start_pos[cnt + 1], :] avg_embeddings += torch.mean(emb, 0) else: mask = torch.tensor( split_tok["attention_mask"][0][pos : len(split_tok["input_ids"][0])] ).unsqueeze(-1) nonmask = np.count_nonzero(mask.squeeze()) emb = row[pos : len(split_tok["input_ids"][0]), :] avg_embeddings += torch.sum(emb * mask, 0) / nonmask torch.cat(mean_emb_weighted, avg_embeddings / len(start_pos)) """mask = attention_mask.unsqueeze(-1).expand(last_layer.size()).float() sum_emb = torch.sum(last_layer * mask, 1) ##num of 1's is the num of tokens. Rest are pads sum_mask = mask.sum(1) ##take care of case where there is no padding sum_mask = torch.clamp(sum_mask, min=1e-9) mean_emb = sum_emb / sum_mask""" norm_mean_emb = self.layer_norm(mean_emb_weighted) out = self.regressor(norm_mean_emb) return out def eval_mse(model, data_loader): model.eval() mse_sum = 0 with torch.no_grad(): for batch_num, (input_ids, attention_mask, start_pos, target) in enumerate( data_loader ): input_ids = input_ids.to(DEVICE) attention_mask = attention_mask.to(DEVICE) start_pos = start_pos.to(DEVICE) target = target.to(DEVICE) pred = model(input_ids, attention_mask, start_pos) mse_sum += nn.MSELoss(reduction="sum")(pred.flatten(), target).item() return mse_sum / len(data_loader.dataset) def predict(model, data_loader): model.eval() result = np.zeros(len(data_loader.dataset)) index = 0 with torch.no_grad(): for batch_num, (input_ids, attention_mask, start_pos) in enumerate(data_loader): input_ids = input_ids.to(DEVICE) attention_mask = attention_mask.to(DEVICE) start_pos = start_pos.to(DEVICE) pred = model(input_ids, attention_mask, start_pos) result[index : index + pred.shape[0]] = pred.flatten().to("cpu") index += pred.shape[0] return result def train( model, model_path, train_loader, val_loader, optimizer, scheduler=None, num_epochs=NUM_EPOCHS, ): best_val_rmse = None best_epoch = 0 step = 0 last_eval_step = 0 eval_period = EVAL_SCHEDULE[0][1] start = time.time() for epoch in range(num_epochs): val_rmse = None for batch_num, (input_ids, attention_mask, start_pos, target) in enumerate( train_loader ): input_ids = input_ids.to(DEVICE) attention_mask = attention_mask.to(DEVICE) start_pos = start_pos.to(DEVICE) target = target.to(DEVICE) optimizer.zero_grad() model.train() pred = model(input_ids, attention_mask, start_pos) mse = nn.MSELoss(reduction="mean")(pred.flatten(), target) mse.backward() optimizer.step() if scheduler: scheduler.step() if step >= last_eval_step + eval_period: elapsed_seconds = time.time() - start num_steps = step - last_eval_step ##print(f"\n{num_steps} steps took {elapsed_seconds:0.3} seconds") last_eval_step = step val_rmse = math.sqrt(eval_mse(model, val_loader)) print( f"Epoch: {epoch} batch_num: {batch_num}", f"val_rmse: {val_rmse:0.4}", ) for rmse, period in EVAL_SCHEDULE: if val_rmse >= rmse: eval_period = period break if not best_val_rmse or val_rmse < best_val_rmse: best_val_rmse = val_rmse best_epoch = epoch torch.save(model.state_dict(), model_path) print(f"New best_val_rmse: {best_val_rmse:0.4}") else: print( f"Still best_val_rmse: {best_val_rmse:0.4}", f"(from epoch {best_epoch})", ) start = time.time() step += 1 return best_val_rmse def create_optimizer(model): named_parameters = list(model.named_parameters()) roberta_parameters = named_parameters[:197] attention_parameters = named_parameters[199:203] regressor_parameters = named_parameters[203:] attention_group = [params for (name, params) in attention_parameters] regressor_group = [params for (name, params) in regressor_parameters] parameters = [] parameters.append({"params": attention_group}) parameters.append({"params": regressor_group}) for layer_num, (name, params) in enumerate(roberta_parameters): weight_decay = 0.0 if "bias" in name else 0.01 lr = 2e-5 if layer_num >= 69: lr = 5e-5 if layer_num >= 133: lr = 1e-4 parameters.append({"params": params, "weight_decay": weight_decay, "lr": lr}) return AdamW(parameters) gc.collect() SEED = 1000 list_val_rmse = [] kfold = KFold(n_splits=NUM_FOLDS, random_state=SEED, shuffle=True) for fold, (train_indices, val_indices) in enumerate(kfold.split(train_df)): print(f"\nFold {fold + 1}/{NUM_FOLDS}") model_path = f"model_{fold + 1}.pth" set_random_seed(SEED + fold) train_dataset = LitDataset(train_df.loc[train_indices]) val_dataset = LitDataset(train_df.loc[val_indices]) train_loader = DataLoader( train_dataset, batch_size=BATCH_SIZE, drop_last=True, shuffle=True, num_workers=2, ) val_loader = DataLoader( val_dataset, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=2, ) set_random_seed(SEED + fold) model = LitModel().to(DEVICE) optimizer = create_optimizer(model) scheduler = get_cosine_schedule_with_warmup( optimizer, num_training_steps=NUM_EPOCHS * len(train_loader), num_warmup_steps=50, ) list_val_rmse.append( train( model, model_path, train_loader, val_loader, optimizer, scheduler=scheduler ) ) del model gc.collect() print("\nPerformance estimates:") print(list_val_rmse) print("Mean:", np.array(list_val_rmse).mean()) test_dataset = LitDataset(test_df, inference_only=True) all_predictions = np.zeros((len(list_val_rmse), len(test_df))) test_dataset = LitDataset(test_df, inference_only=True) test_loader = DataLoader( test_dataset, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=2 ) for index in range(len(list_val_rmse)): model_path = f"model_{index + 1}.pth" print(f"\nUsing {model_path}") model = LitModel() model.load_state_dict(torch.load(model_path)) model.to(DEVICE) all_predictions[index] = predict(model, test_loader) del model gc.collect() predictions = all_predictions.mean(axis=0) submission_df.target = predictions print(submission_df) submission_df.to_csv("submission.csv", index=False)
false
3
3,636
0
3,707
3,636
69696364
<jupyter_start><jupyter_text>COVID and Crime Kaggle dataset identifier: covid-and-crime <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from glob import glob # pd.set_option('display.max_rows', 500) # pd.set_option('display.max_columns', 500) # pd.set_option('display.width', 1000) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input/covid-and-crime"): for filename in filenames: print(os.path.join(dirname, filename)) # # COVID and Crime Analysis # ## Understanding the impact of COVID and Lockdown on Street Crime in 2020 # data = sorted(glob("/kaggle/input/covid-and-crime/*metropolitan-street.csv")) crimes = pd.concat( (pd.read_csv(file).assign(filename=file) for file in data), ignore_index=True ) # crimes.head() # There's a bunch of columns we don't need, drop these to make dataset easier to work with crimes.drop( [ "Crime ID", "Reported by", "Falls within", "Longitude", "Latitude", "Location", "LSOA code", "Last outcome category", "Context", "filename", ], axis=1, inplace=True, ) # Rename the remaining headings to remove spaces columns_headings = ["Year_Month", "LSOA_Name", "Crime_Type"] crimes.columns = columns_headings # Split Year_Month into new Month and Year columns crimes["Month"] = crimes.Year_Month.str.slice(5) crimes["Year"] = crimes.Year_Month.str.slice(0, 4) # Split the LSOA_Name field using rpartition() # This results in a dataframe ('result') comprising 3 columns; # [0] The Region [1] A Space [2] The LSOA Code # Use column [0] to create a new Region column result = crimes.LSOA_Name.str.rpartition() crimes["Region"] = result[0] # Drop the original 'Year_Month' and 'LSOA_Name' columns as no longer needed crimes.drop(["Year_Month", "LSOA_Name"], axis=1, inplace=True) # Reorder the columns using '.reindex' neworder = ["Region", "Year", "Month", "Crime_Type"] crimes = crimes.reindex(columns=neworder) # The simplified dataset crimes.head() # Count Crimes by Region then Sort Descending # The top 33 values will (should) contain all the London Boroughs region_summary = crimes.groupby(["Region"]).Region.count() sorted_region_summary = region_summary.sort_values(ascending=False) data = sorted_region_summary.head(33) boroughs = data.index.to_list() # Create one dataframe for Met Crimes and one for all others met_crimes = crimes.loc[crimes.Region.isin(boroughs)] non_met_crimes = crimes.loc[~crimes.Region.isin(boroughs)] # Quick summary of what we have in met_crimes met_crimes.groupby("Crime_Type").Crime_Type.count() # Quick summary of what we have in non_met_crimes non_met_crimes.groupby("Crime_Type").Crime_Type.count() year = "2020" crime = "Anti-social behaviour" plot_data = ( met_crimes.loc[(met_crimes.Year == year) & (met_crimes.Crime_Type == crime), :] .groupby(["Region", "Month"]) .Crime_Type.count() .reset_index() ) plot_data.columns = ["Region", "Month", "Count"] df = plot_data.pivot(index="Month", columns="Region", values="Count") df2 = plot_data.pivot(index="Region", columns="Month", values="Count") df.head() plt.rcParams["figure.figsize"] = (25, 20) plt.xticks(fontsize=20) plt.yticks(fontsize=20) sns.set(font_scale=10) sns.set_context("paper") mychart = sns.lineplot(data=df, linewidth=1.5, dashes=False, palette="Paired") mychart.set_title((crime + " by Month " + "(" + year + ")"), fontsize=25) mychart.set_xlabel("Month", fontsize=25) mychart.set_ylabel("Count", fontsize=25) mychart.legend(loc=1, bbox_to_anchor=(1, 1), fontsize=15, shadow=True) plt.rcParams["figure.figsize"] = (25, 20) plt.xticks(fontsize=20) plt.yticks(fontsize=20) sns.set(font_scale=10) sns.set_context("paper") mychart = sns.boxplot(data=df2, palette="Paired") mychart.set_title((crime + " by Month " + "(" + year + ")"), fontsize=25) mychart.set_xlabel("Month", fontsize=25) mychart.set_ylabel("Count", fontsize=25)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/696/69696364.ipynb
covid-and-crime
stevecollins
[{"Id": 69696364, "ScriptId": 19042866, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7307169, "CreationDate": "08/02/2021 20:23:00", "VersionNumber": 2.0, "Title": "COVID and Crime Analysis", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 116.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 110.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93169673, "KernelVersionId": 69696364, "SourceDatasetVersionId": 2492920}]
[{"Id": 2492920, "DatasetId": 1509133, "DatasourceVersionId": 2535502, "CreatorUserId": 7307169, "LicenseName": "Other (specified in description)", "CreationDate": "08/02/2021 20:00:31", "VersionNumber": 1.0, "Title": "COVID and Crime", "Slug": "covid-and-crime", "Subtitle": "2020 Street Crime Data for the Metropolitan Police", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1509133, "CreatorUserId": 7307169, "OwnerUserId": 7307169.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2492920.0, "CurrentDatasourceVersionId": 2535502.0, "ForumId": 1528883, "Type": 2, "CreationDate": "08/02/2021 20:00:31", "LastActivityDate": "08/02/2021", "TotalViews": 996, "TotalDownloads": 68, "TotalVotes": 1, "TotalKernels": 1}]
[{"Id": 7307169, "UserName": "stevecollins", "DisplayName": "Steve Collins", "RegisterDate": "04/29/2021", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns from glob import glob # pd.set_option('display.max_rows', 500) # pd.set_option('display.max_columns', 500) # pd.set_option('display.width', 1000) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input/covid-and-crime"): for filename in filenames: print(os.path.join(dirname, filename)) # # COVID and Crime Analysis # ## Understanding the impact of COVID and Lockdown on Street Crime in 2020 # data = sorted(glob("/kaggle/input/covid-and-crime/*metropolitan-street.csv")) crimes = pd.concat( (pd.read_csv(file).assign(filename=file) for file in data), ignore_index=True ) # crimes.head() # There's a bunch of columns we don't need, drop these to make dataset easier to work with crimes.drop( [ "Crime ID", "Reported by", "Falls within", "Longitude", "Latitude", "Location", "LSOA code", "Last outcome category", "Context", "filename", ], axis=1, inplace=True, ) # Rename the remaining headings to remove spaces columns_headings = ["Year_Month", "LSOA_Name", "Crime_Type"] crimes.columns = columns_headings # Split Year_Month into new Month and Year columns crimes["Month"] = crimes.Year_Month.str.slice(5) crimes["Year"] = crimes.Year_Month.str.slice(0, 4) # Split the LSOA_Name field using rpartition() # This results in a dataframe ('result') comprising 3 columns; # [0] The Region [1] A Space [2] The LSOA Code # Use column [0] to create a new Region column result = crimes.LSOA_Name.str.rpartition() crimes["Region"] = result[0] # Drop the original 'Year_Month' and 'LSOA_Name' columns as no longer needed crimes.drop(["Year_Month", "LSOA_Name"], axis=1, inplace=True) # Reorder the columns using '.reindex' neworder = ["Region", "Year", "Month", "Crime_Type"] crimes = crimes.reindex(columns=neworder) # The simplified dataset crimes.head() # Count Crimes by Region then Sort Descending # The top 33 values will (should) contain all the London Boroughs region_summary = crimes.groupby(["Region"]).Region.count() sorted_region_summary = region_summary.sort_values(ascending=False) data = sorted_region_summary.head(33) boroughs = data.index.to_list() # Create one dataframe for Met Crimes and one for all others met_crimes = crimes.loc[crimes.Region.isin(boroughs)] non_met_crimes = crimes.loc[~crimes.Region.isin(boroughs)] # Quick summary of what we have in met_crimes met_crimes.groupby("Crime_Type").Crime_Type.count() # Quick summary of what we have in non_met_crimes non_met_crimes.groupby("Crime_Type").Crime_Type.count() year = "2020" crime = "Anti-social behaviour" plot_data = ( met_crimes.loc[(met_crimes.Year == year) & (met_crimes.Crime_Type == crime), :] .groupby(["Region", "Month"]) .Crime_Type.count() .reset_index() ) plot_data.columns = ["Region", "Month", "Count"] df = plot_data.pivot(index="Month", columns="Region", values="Count") df2 = plot_data.pivot(index="Region", columns="Month", values="Count") df.head() plt.rcParams["figure.figsize"] = (25, 20) plt.xticks(fontsize=20) plt.yticks(fontsize=20) sns.set(font_scale=10) sns.set_context("paper") mychart = sns.lineplot(data=df, linewidth=1.5, dashes=False, palette="Paired") mychart.set_title((crime + " by Month " + "(" + year + ")"), fontsize=25) mychart.set_xlabel("Month", fontsize=25) mychart.set_ylabel("Count", fontsize=25) mychart.legend(loc=1, bbox_to_anchor=(1, 1), fontsize=15, shadow=True) plt.rcParams["figure.figsize"] = (25, 20) plt.xticks(fontsize=20) plt.yticks(fontsize=20) sns.set(font_scale=10) sns.set_context("paper") mychart = sns.boxplot(data=df2, palette="Paired") mychart.set_title((crime + " by Month " + "(" + year + ")"), fontsize=25) mychart.set_xlabel("Month", fontsize=25) mychart.set_ylabel("Count", fontsize=25)
false
0
1,371
0
1,397
1,371
69696269
<jupyter_start><jupyter_text>US counties COVID 19 dataset From the New York Times GITHUB source: [CSV US counties](About this file Edit https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv) "The New York Times is releasing a series of data files with cumulative counts of coronavirus cases in the United States, at the state and county level, over time. We are compiling this time series data from state and local governments and health departments in an attempt to provide a complete record of the ongoing outbreak. Since late January, The Times has tracked cases of coronavirus in real time as they were identified after testing. Because of the widespread shortage of testing, however, the data is necessarily limited in the picture it presents of the outbreak. We have used this data to power our maps and reporting tracking the outbreak, and it is now being made available to the public in response to requests from researchers, scientists and government officials who would like access to the data to better understand the outbreak. The data begins with the first reported coronavirus case in Washington State on Jan. 21, 2020. We will publish regular updates to the data in this repository. United States Data Data on cumulative coronavirus cases and deaths can be found in two files for states and counties. Each row of data reports cumulative counts based on our best reporting up to the moment we publish an update. We do our best to revise earlier entries in the data when we receive new information." The specific data here, is the data PER US COUNTY. The CSV link for counties is: https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv Kaggle dataset identifier: us-counties-covid-19-dataset <jupyter_code>import pandas as pd df = pd.read_csv('us-counties-covid-19-dataset/us-counties.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 2502832 entries, 0 to 2502831 Data columns (total 6 columns): # Column Dtype --- ------ ----- 0 date object 1 county object 2 state object 3 fips float64 4 cases int64 5 deaths float64 dtypes: float64(2), int64(1), object(3) memory usage: 114.6+ MB <jupyter_text>Examples: { "date": "2020-01-21 00:00:00", "county": "Snohomish", "state": "Washington", "fips": 53061, "cases": 1, "deaths": 0 } { "date": "2020-01-22 00:00:00", "county": "Snohomish", "state": "Washington", "fips": 53061, "cases": 1, "deaths": 0 } { "date": "2020-01-23 00:00:00", "county": "Snohomish", "state": "Washington", "fips": 53061, "cases": 1, "deaths": 0 } { "date": "2020-01-24 00:00:00", "county": "Cook", "state": "Illinois", "fips": 17031, "cases": 1, "deaths": 0 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) district = pd.read_csv( "../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv", index_col=0, ).dropna(how="all") district.head() district.shape states = ( pd.read_csv( "../input/us-counties-covid-19-dataset/us-counties.csv", usecols=["date", "state", "cases", "deaths"], ) .groupby(["state", "date"]) .sum() .sort_values(by=["state", "date"], ascending=True) ) states.head() allstates, _ = zip(*states.index) allstates = set(allstates) allstates states_dic = {state: states.loc[state] for state in allstates} for state, df in states_dic.items(): df[["cases", "deaths"]] -= df[["cases", "deaths"]].shift(1) states_dic[state] = df.dropna().rolling(7, min_periods=1).mean() product = pd.read_csv( "../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv" ) product.head() # ### First Study : New York ny_ids = district.loc[district.state == "New York"].index ny_engagement = ( pd.concat( [ pd.read_csv( f"../input/learnplatform-covid19-impact-on-digital-learning/engagement_data/{id}.csv" ) for id in ny_ids ], axis=0, ) .fillna(0) .groupby(["time", "lp_id"]) .mean() .reset_index() .groupby("time") .sum() .drop(columns=["lp_id"]) ) ny_engagement ny_engagement = ( states_dic["New York"][["deaths"]].join(ny_engagement, how="right").fillna(0) ) ny_engagement["covid_has_started"] = 0 ny_engagement.loc[ny_engagement.deaths.cumsum() > 0, "covid_has_started"] = 1 ny_engagement ny_engagement.corr()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/696/69696269.ipynb
us-counties-covid-19-dataset
fireballbyedimyrnmom
[{"Id": 69696269, "ScriptId": 19042509, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3612572, "CreationDate": "08/02/2021 20:22:27", "VersionNumber": 1.0, "Title": "[Learn Platform] Starter Notebook", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 73.0, "LinesInsertedFromPrevious": 73.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 93169587, "KernelVersionId": 69696269, "SourceDatasetVersionId": 2466445}]
[{"Id": 2466445, "DatasetId": 575374, "DatasourceVersionId": 2508903, "CreatorUserId": 4253886, "LicenseName": "Other (specified in description)", "CreationDate": "07/26/2021 22:16:13", "VersionNumber": 280.0, "Title": "US counties COVID 19 dataset", "Slug": "us-counties-covid-19-dataset", "Subtitle": "NYT's github CSV on COVID19 per US counties", "Description": "From the New York Times GITHUB source:\n[CSV US counties](About this file Edit https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv)\n\"The New York Times is releasing a series of data files with cumulative counts of coronavirus cases in the United States, at the state and county level, over time. We are compiling this time series data from state and local governments and health departments in an attempt to provide a complete record of the ongoing outbreak.\n\nSince late January, The Times has tracked cases of coronavirus in real time as they were identified after testing. Because of the widespread shortage of testing, however, the data is necessarily limited in the picture it presents of the outbreak.\n\nWe have used this data to power our maps and reporting tracking the outbreak, and it is now being made available to the public in response to requests from researchers, scientists and government officials who would like access to the data to better understand the outbreak.\n\nThe data begins with the first reported coronavirus case in Washington State on Jan. 21, 2020. We will publish regular updates to the data in this repository.\nUnited States Data\n\nData on cumulative coronavirus cases and deaths can be found in two files for states and counties.\n\nEach row of data reports cumulative counts based on our best reporting up to the moment we publish an update. We do our best to revise earlier entries in the data when we receive new information.\"\n\nThe specific data here, is the data PER US COUNTY.\n\nThe CSV link for counties is: https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv", "VersionNotes": "Automatic Update 2021-07-26", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 575374, "CreatorUserId": 4253886, "OwnerUserId": 4253886.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6250084.0, "CurrentDatasourceVersionId": 6329846.0, "ForumId": 589164, "Type": 2, "CreationDate": "03/28/2020 15:04:26", "LastActivityDate": "03/28/2020", "TotalViews": 195793, "TotalDownloads": 21584, "TotalVotes": 446, "TotalKernels": 167}]
[{"Id": 4253886, "UserName": "fireballbyedimyrnmom", "DisplayName": "MyrnaMFL", "RegisterDate": "12/26/2019", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) district = pd.read_csv( "../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv", index_col=0, ).dropna(how="all") district.head() district.shape states = ( pd.read_csv( "../input/us-counties-covid-19-dataset/us-counties.csv", usecols=["date", "state", "cases", "deaths"], ) .groupby(["state", "date"]) .sum() .sort_values(by=["state", "date"], ascending=True) ) states.head() allstates, _ = zip(*states.index) allstates = set(allstates) allstates states_dic = {state: states.loc[state] for state in allstates} for state, df in states_dic.items(): df[["cases", "deaths"]] -= df[["cases", "deaths"]].shift(1) states_dic[state] = df.dropna().rolling(7, min_periods=1).mean() product = pd.read_csv( "../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv" ) product.head() # ### First Study : New York ny_ids = district.loc[district.state == "New York"].index ny_engagement = ( pd.concat( [ pd.read_csv( f"../input/learnplatform-covid19-impact-on-digital-learning/engagement_data/{id}.csv" ) for id in ny_ids ], axis=0, ) .fillna(0) .groupby(["time", "lp_id"]) .mean() .reset_index() .groupby("time") .sum() .drop(columns=["lp_id"]) ) ny_engagement ny_engagement = ( states_dic["New York"][["deaths"]].join(ny_engagement, how="right").fillna(0) ) ny_engagement["covid_has_started"] = 0 ny_engagement.loc[ny_engagement.deaths.cumsum() > 0, "covid_has_started"] = 1 ny_engagement ny_engagement.corr()
[{"us-counties-covid-19-dataset/us-counties.csv": {"column_names": "[\"date\", \"county\", \"state\", \"fips\", \"cases\", \"deaths\"]", "column_data_types": "{\"date\": \"object\", \"county\": \"object\", \"state\": \"object\", \"fips\": \"float64\", \"cases\": \"int64\", \"deaths\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2502832 entries, 0 to 2502831\nData columns (total 6 columns):\n # Column Dtype \n--- ------ ----- \n 0 date object \n 1 county object \n 2 state object \n 3 fips float64\n 4 cases int64 \n 5 deaths float64\ndtypes: float64(2), int64(1), object(3)\nmemory usage: 114.6+ MB\n", "summary": "{\"fips\": {\"count\": 2479154.0, \"mean\": 31399.58357286397, \"std\": 16342.509037015261, \"min\": 1001.0, \"25%\": 19023.0, \"50%\": 30011.0, \"75%\": 46111.0, \"max\": 78030.0}, \"cases\": {\"count\": 2502832.0, \"mean\": 10033.804996899513, \"std\": 47525.21722359815, \"min\": 0.0, \"25%\": 382.0, \"50%\": 1773.0, \"75%\": 5884.0, \"max\": 2908425.0}, \"deaths\": {\"count\": 2445227.0, \"mean\": 161.61002270954802, \"std\": 820.3334694664128, \"min\": 0.0, \"25%\": 6.0, \"50%\": 33.0, \"75%\": 101.0, \"max\": 40267.0}}", "examples": "{\"date\":{\"0\":\"2020-01-21\",\"1\":\"2020-01-22\",\"2\":\"2020-01-23\",\"3\":\"2020-01-24\"},\"county\":{\"0\":\"Snohomish\",\"1\":\"Snohomish\",\"2\":\"Snohomish\",\"3\":\"Cook\"},\"state\":{\"0\":\"Washington\",\"1\":\"Washington\",\"2\":\"Washington\",\"3\":\"Illinois\"},\"fips\":{\"0\":53061.0,\"1\":53061.0,\"2\":53061.0,\"3\":17031.0},\"cases\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\"deaths\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0}}"}}]
true
3
<start_data_description><data_path>us-counties-covid-19-dataset/us-counties.csv: <column_names> ['date', 'county', 'state', 'fips', 'cases', 'deaths'] <column_types> {'date': 'object', 'county': 'object', 'state': 'object', 'fips': 'float64', 'cases': 'int64', 'deaths': 'float64'} <dataframe_Summary> {'fips': {'count': 2479154.0, 'mean': 31399.58357286397, 'std': 16342.509037015261, 'min': 1001.0, '25%': 19023.0, '50%': 30011.0, '75%': 46111.0, 'max': 78030.0}, 'cases': {'count': 2502832.0, 'mean': 10033.804996899513, 'std': 47525.21722359815, 'min': 0.0, '25%': 382.0, '50%': 1773.0, '75%': 5884.0, 'max': 2908425.0}, 'deaths': {'count': 2445227.0, 'mean': 161.61002270954802, 'std': 820.3334694664128, 'min': 0.0, '25%': 6.0, '50%': 33.0, '75%': 101.0, 'max': 40267.0}} <dataframe_info> RangeIndex: 2502832 entries, 0 to 2502831 Data columns (total 6 columns): # Column Dtype --- ------ ----- 0 date object 1 county object 2 state object 3 fips float64 4 cases int64 5 deaths float64 dtypes: float64(2), int64(1), object(3) memory usage: 114.6+ MB <some_examples> {'date': {'0': '2020-01-21', '1': '2020-01-22', '2': '2020-01-23', '3': '2020-01-24'}, 'county': {'0': 'Snohomish', '1': 'Snohomish', '2': 'Snohomish', '3': 'Cook'}, 'state': {'0': 'Washington', '1': 'Washington', '2': 'Washington', '3': 'Illinois'}, 'fips': {'0': 53061.0, '1': 53061.0, '2': 53061.0, '3': 17031.0}, 'cases': {'0': 1, '1': 1, '2': 1, '3': 1}, 'deaths': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}} <end_description>
573
1
1,459
573
69696591
# Ignore how it says "no data sources" above. That means there are no *static* data sources. Here are the data sources for this notebook, which are accessed at runtime to create the graphs below: # * [JHU github repository](https://github.com/CSSEGISandData/COVID-19) # * [Texas HHS](https://dshs.texas.gov/news/updates.shtm) # * [Walker County, TX Office of Emergency Management](https://www.co.walker.tx.us/department/index.php?structureid=17) # This is a notebook for visualizing trends in COVID-19 data that I have not seen elsewhere. This notebook is meant to complement (not replace) other trackers, such as [worldometer](https://www.worldometers.info/coronavirus/). Other great trackers include [ProPublica's tracker](https://projects.propublica.org/reopening-america/), [covid19-projections](https://covid19-projections.com/about/), [CovidActNow](https://covidactnow.org/us/tx/?s=44750), and the [Texas Tribune tracker](https://apps.texastribune.org/features/2020/texas-coronavirus-cases-map/). # This notebook will be updated with new graphs approximately weekly. # County-level visualizations will remain focused on areas important to me. If you would like to see areas other than those depicted below and you know me, please ask. Otherwise, you may fork the notebook and modify it accordingly. The necessary modifications should be straightforward. # TODO: # -test incr vs cases incr, last 7 and 14 days - the computations are done, but think about the presentation import sys import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import urllib.request import datetime import plotly.graph_objs as go from plotly.offline import iplot from plotly.subplots import make_subplots # data source: https://www.co.walker.tx.us/department/index.php?structureid=17 # unfortunately I have to update this one by hand. UGH. df_Walker_TX_free = pd.DataFrame( { "Province_State": ["Texas"], "Admin2": ["Walker Free"], "Country_Region": ["US"], "3/22/20": [1], "3/23/20": [1], "3/24/20": [1], "3/25/20": [1], "3/26/20": [2], "3/27/20": [2], "3/28/20": [3], "3/29/20": [3], "3/30/20": [3], "3/31/20": [6], "4/1/20": [7], "4/2/20": [9], "4/3/20": [10], "4/4/20": [10], "4/5/20": [10], "4/6/20": [15], "4/7/20": [15], "4/8/20": [16], "4/9/20": [18], "4/10/20": [18], "4/11/20": [18], "4/12/20": [18], "4/13/20": [21], "4/14/20": [26], "4/15/20": [41], "4/16/20": [43], "4/17/20": [44], "4/18/20": [44], "4/19/20": [44], "4/20/20": [49], "4/21/20": [51], "4/22/20": [59], "4/23/20": [61], "4/24/20": [65], "4/25/20": [65], "4/26/20": [65], "4/27/20": [69], "4/28/20": [83], "4/29/20": [83], "4/30/20": [104], "5/1/20": [108], "5/2/20": [108], "5/3/20": [108], "5/4/20": [114], "5/5/20": [128], "5/6/20": [128], "5/7/20": [128], "5/8/20": [128], "5/9/20": [128], "5/10/20": [128], "5/11/20": [131], "5/12/20": [131], "5/13/20": [137], "5/14/20": [137], "5/15/20": [137], "5/16/20": [137], "5/17/20": [137], "5/18/20": [144], "5/19/20": [150], "5/20/20": [151], "5/21/20": [151], "5/22/20": [151], "5/23/20": [151], "5/24/20": [151], "5/25/20": [159], "5/26/20": [167], "5/27/20": [172], "5/28/20": [172], "5/29/20": [172], "5/30/20": [172], "5/31/20": [184], "6/1/20": [188], "6/2/20": [193], "6/3/20": [194], "6/4/20": [201], "6/5/20": [201], "6/6/20": [201], "6/7/20": [210], "6/8/20": [210], "6/9/20": [211], "6/10/20": [217], "6/11/20": [220], "6/12/20": [223], "6/13/20": [223], "6/14/20": [223], "6/15/20": [242], "6/16/20": [248], "6/17/20": [265], "6/18/20": [274], "6/19/20": [283], "6/20/20": [283], "6/21/20": [283], "6/22/20": [306], "6/23/20": [320], "6/24/20": [373], "6/25/20": [393], "6/26/20": [415], "6/27/20": [415], "6/28/20": [415], "6/29/20": [452], "6/30/20": [465], "7/1/20": [499], "7/2/20": [541], "7/3/20": [541], "7/4/20": [541], "7/5/20": [541], "7/6/20": [628], "7/7/20": [656], "7/8/20": [665], "7/9/20": [697], "7/10/20": [715], "7/11/20": [715], "7/12/20": [715], "7/13/20": [773], "7/14/20": [773], "7/15/20": [831], "7/16/20": [848], "7/17/20": [848], "7/18/20": [848], "7/19/20": [848], "7/20/20": [934], "7/21/20": [970], "7/22/20": [1007], "7/23/20": [1021], "7/24/20": [1030], "7/25/20": [1030], "7/26/20": [1030], "7/27/20": [1053], "7/28/20": [1076], "7/29/20": [1089], "7/30/20": [1098], "7/31/20": [1107], "8/1/20": [1107], "8/2/20": [1107], "8/3/20": [1136], "8/4/20": [1145], "8/5/20": [1157], "8/6/20": [1172], "8/7/20": [1185], "8/8/20": [1185], "8/9/20": [1185], "8/10/20": [1239], "8/11/20": [1286], "8/12/20": [1316], "8/13/20": [1347], "8/14/20": [1401], "8/15/20": [1401], "8/16/20": [1401], "8/17/20": [1422], "8/18/20": [1426], "8/19/20": [1433], "8/20/20": [1447], "8/21/20": [1489], "8/22/20": [1489], "8/23/20": [1489], "8/24/20": [1560], "8/25/20": [1582], "8/26/20": [1613], "8/27/20": [1635], "8/28/20": [1668], "8/29/20": [1668], "8/30/20": [1668], "8/31/20": [1711], "9/1/20": [1728], "9/2/20": [1738], "9/3/20": [1750], "9/4/20": [1750], "9/5/20": [1750], "9/6/20": [1750], "9/7/20": [1750], "9/8/20": [1781], "9/9/20": [1802], "9/10/20": [1825], "9/11/20": [1838], "9/12/20": [1838], "9/13/20": [1838], "9/14/20": [1966], "9/15/20": [2005], "9/16/20": [2038], "9/17/20": [2058], "9/18/20": [2086], "9/19/20": [2086], "9/20/20": [2086], "9/21/20": [2129], "9/22/20": [2165], "9/23/20": [2174], "9/24/20": [2191], "9/25/20": [2209], "9/26/20": [2209], "9/27/20": [2209], "9/28/20": [2246], "9/29/20": [2265], "9/30/20": [2294], "10/1/20": [2305], "10/2/20": [2312], "10/3/20": [2312], "10/4/20": [2312], "10/5/20": [2325], "10/6/20": [2370], "10/7/20": [2376], "10/8/20": [2393], "10/9/20": [2397], "10/10/20": [2397], "10/11/20": [2397], "10/12/20": [2397], "10/13/20": [2425], "10/14/20": [2451], "10/15/20": [2465], "10/16/20": [2465], "10/17/20": [2465], "10/18/20": [2465], "10/19/20": [2483], "10/20/20": [2499], "10/21/20": [2513], "10/22/20": [2524], "10/23/20": [2529], "10/24/20": [2529], "10/25/20": [2529], "10/26/20": [2552], "10/27/20": [2585], "10/28/20": [2601], "10/29/20": [2609], "10/30/20": [2609], "10/31/20": [2609], "11/1/20": [2609], "11/2/20": [2653], "11/3/20": [2665], "11/4/20": [2666], "11/5/20": [2675], "11/6/20": [2690], "11/7/20": [2690], "11/8/20": [2690], "11/9/20": [2707], "11/10/20": [2743], "11/11/20": [2743], "11/12/20": [2777], "11/13/20": [2811], "11/14/20": [2811], "11/15/20": [2811], "11/16/20": [2840], "11/17/20": [2908], "11/18/20": [2910], "11/19/20": [2956], "11/20/20": [2999], "11/21/20": [2999], "11/22/20": [2999], "11/23/20": [3033], "11/24/20": [3136], "11/25/20": [3167], "11/26/20": [3167], "11/27/20": [3167], "11/28/20": [3167], "11/29/20": [3167], "11/30/20": [3221], "12/1/20": [3332], "12/2/20": [3423], "12/3/20": [3434], "12/4/20": [3494], "12/5/20": [3494], "12/6/20": [3494], "12/7/20": [3515], "12/8/20": [3555], "12/9/20": [3565], "12/10/20": [3573], "12/11/20": [3573], "12/12/20": [3573], "12/13/20": [3573], "12/14/20": [3623], "12/15/20": [3702], "12/16/20": [3731], "12/17/20": [3762], "12/18/20": [3800], "12/19/20": [3800], "12/20/20": [3800], "12/21/20": [3862], "12/22/20": [3943], "12/23/20": [3987], "12/24/20": [3987], "12/25/20": [3987], "12/26/20": [3987], "12/27/20": [3987], "12/28/20": [4000], "12/29/20": [4115], "12/30/20": [4201], "12/31/20": [4212], "1/1/21": [4212], "1/2/21": [4212], "1/3/21": [4212], "1/4/21": [4239], "1/5/21": [4392], "1/6/21": [4440], "1/7/21": [4466], "1/8/21": [4490], "1/9/21": [4490], "1/10/21": [4490], "1/11/21": [4509], "1/12/21": [4644], "1/13/21": [4689], "1/14/21": [4709], "1/15/21": [4795], "1/16/21": [4795], "1/17/21": [4795], "1/18/21": [4795], "1/19/21": [4837], "1/20/21": [5022], "1/21/21": [5032], "1/22/21": [5100], "1/23/21": [5100], "1/24/21": [5100], "1/25/21": [5130], "1/26/21": [5212], "1/27/21": [5232], "1/28/21": [5253], "1/29/21": [5290], "1/30/21": [5290], "1/31/21": [5290], "2/1/21": [5338], "2/2/21": [5480], "2/3/21": [5528], "2/4/21": [5545], "2/5/21": [5595], "2/6/21": [5595], "2/7/21": [5595], "2/8/21": [5664], "2/9/21": [5717], "2/10/21": [5791], "2/11/21": [5818], "2/12/21": [5847], "2/13/21": [5847], "2/14/21": [5847], "2/15/21": [5878], "2/16/21": [5878], "2/17/21": [5878], "2/18/21": [5878], "2/19/21": [5878], "2/20/21": [5878], "2/21/21": [5878], "2/22/21": [5990], "2/23/21": [6018], "2/24/21": [6033], "2/25/21": [6050], "2/26/21": [6076], "2/27/21": [6076], "2/28/21": [6076], "3/1/21": [6098], "3/2/21": [6098], "3/3/21": [6159], "3/4/21": [6162], "3/5/21": [6174], "3/6/21": [6174], "3/7/21": [6174], "3/8/21": [6186], "3/9/21": [6219], "3/10/21": [6219], "3/11/21": [6226], "3/12/21": [6242], "3/13/21": [6242], "3/14/21": [6242], "3/15/21": [6253], "3/16/21": [6285], "3/17/21": [6295], "3/18/21": [6302], "3/19/21": [6307], "3/20/21": [6307], "3/21/21": [6307], "3/22/21": [6312], "3/23/21": [6356], "3/24/21": [6365], "3/25/21": [6381], "3/26/21": [6396], "3/27/21": [6396], "3/28/21": [6396], "3/29/21": [6421], "3/30/21": [6486], "3/31/21": [6495], "4/1/21": [6505], "4/2/21": [6523], "4/3/21": [6523], "4/4/21": [6523], "4/5/21": [6535], "4/6/21": [6566], "4/7/21": [6582], "4/8/21": [6588], "4/9/21": [6603], "4/10/21": [6603], "4/11/21": [6603], "4/12/21": [6609], "4/13/21": [6647], "4/14/21": [6675], "4/15/21": [6689], "4/16/21": [6691], "4/17/21": [6691], "4/18/21": [6691], "4/19/21": [6692], "4/20/21": [6710], "4/21/21": [6731], "4/22/21": [6763], "4/23/21": [6773], "4/24/21": [6773], "4/25/21": [6773], "4/26/21": [6774], "4/27/21": [6843], "4/28/21": [6843 + 88], "4/29/21": [6843 + 88 + 12], "4/30/21": [6843 + 88 + 12 + 11], "5/1/21": [6954], "5/2/21": [6954], "5/3/21": [6954 + 21], "5/4/21": [6954 + 21 + 29], "5/5/21": [6954 + 21 + 29 + 3], "5/6/21": [6954 + 21 + 29 + 3 + 4], "5/7/21": [6954 + 21 + 29 + 3 + 4 + 6], "5/8/21": [7017], "5/9/21": [7017], "5/10/21": [7017 + 5], "5/11/21": [7017 + 5 + 23], "5/12/21": [7017 + 5 + 23 + 13], "5/13/21": [7017 + 5 + 23 + 13 + 8], "5/14/21": [7017 + 5 + 23 + 13 + 8 + 11], "5/15/21": [7077], "5/16/21": [7077], "5/17/21": [7077 + 9], "5/18/21": [7077 + 9 + 21], "5/19/21": [7077 + 9 + 21 + 13], "5/20/21": [7077 + 9 + 21 + 13 + 2], "5/21/21": [7077 + 9 + 21 + 13 + 2 + 2], "5/22/21": [7124], "5/23/21": [7124], "5/24/21": [7124 + 12], "5/25/21": [7124 + 12 + 14], "5/26/21": [7124 + 12 + 14 + 5], "5/27/21": [7124 + 12 + 14 + 5 + 6], "5/28/21": [7124 + 12 + 14 + 5 + 6 + 3], "5/29/21": [7164], "5/30/21": [7164], "5/31/21": [7164], "6/1/21": [7164 + 3], "6/2/21": [7164 + 3 + 14], "6/3/21": [7164 + 3 + 14 + 7], "6/4/21": [7164 + 3 + 14 + 7 + 1], "6/5/21": [7189], "6/6/21": [7189], "6/7/21": [7189 + 3], "6/8/21": [7189 + 3 + 7], "6/9/21": [7189 + 3 + 7 + 0], "6/10/21": [7189 + 3 + 7 + 0 + 2], "6/11/21": [7201], "6/12/21": [7201], "6/13/21": [7201], "6/14/21": [7201 + 9], "6/15/21": [7201 + 9 + 11], "6/16/21": [7201 + 9 + 11 + 1], "6/17/21": [7201 + 9 + 11 + 1 + 3], "6/18/21": [7201 + 9 + 11 + 1 + 3 + 1], "6/19/21": [7226], "6/20/21": [7226], "6/21/21": [7226 + 0], "6/22/21": [7226 + 0 + 9], "6/23/21": [7226 + 0 + 9 + 0], "6/24/21": [7226 + 0 + 9 + 0 + 2], "6/25/21": [7226 + 0 + 9 + 0 + 2 + 1], "6/26/21": [7238], "6/27/21": [7238], "6/28/21": [7238 + 0], "6/29/21": [7238 + 0 + 10], "6/30/21": [7238 + 0 + 10 + 10], "7/1/21": [7238 + 0 + 10 + 10 + 1], "7/2/21": [7238 + 0 + 10 + 10 + 1 + 3], "7/3/21": [7262], "7/4/21": [7262], "7/5/21": [7262], # no longer updated after 7/5/2021 } ) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session def clear_working_directory(): for a, b, c in os.walk("/kaggle/working"): for f in c: if f[0] != "_": target = os.path.join(a, f) os.remove(target) # plotting imports pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt import seaborn as sns # display settings pd.options.display.width = 1200 pd.set_option("display.max_columns", 20) pd.set_option("display.max_rows", 100) # download the data # clear_working_directory() today_datetime = datetime.datetime.today() today = str(today_datetime).split(" ")[0] target = "/kaggle/working/JHU_TS_US_confirmed_" + today + ".csv" if not os.path.isfile(target): # if we haven't already downloaded the data today... urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv", target, ) target = "/kaggle/working/JHU_TS_US_deaths_" + today + ".csv" if not os.path.isfile(target): urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv", target, ) target = "/kaggle/working/TX_HHS_" + today + ".xlsx" if not os.path.isfile(target): urllib.request.urlretrieve( "https://dshs.texas.gov/coronavirus/TexasCOVID19CaseCountData.xlsx", target ) target = "/kaggle/working/TX_HHS_cumulative_tests_county_" + today + ".xlsx" if not os.path.isfile(target): urllib.request.urlretrieve( "https://dshs.texas.gov/coronavirus/TexasCOVID-19CumulativeTestsOverTimebyCounty.xlsx", target, ) def datetime_to_JHU_date(d): """d an object that has the form of datetime.datetime.today()""" s = str(d).split(" ")[0] s = s.split("-") s = "-".join([s[1], s[2], s[0]]) return s target = "/kaggle/working/JHU_daily_us_most_recent.csv" JHU_datetime = today_datetime if not os.path.isfile(target): done, attempts_remaining = False, 10 while (not done) and (attempts_remaining > 0): JHU_datestr = datetime_to_JHU_date(JHU_datetime) try: urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + JHU_datestr + ".csv", target, ) # fourteen_days_ago = date_JHU - datetime.timedelta(days=14) #Actually, I don't need this yet # urllib.request.urlretrieve('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/'+'....'+'.csv', target) done = True except: JHU_datetime = JHU_datetime - datetime.timedelta(days=1) attempts_remaining -= 1 if attempts_remaining == 0: print( "Warning: JHU daily_reports_us has not been updated in over 8 days. Many graphing attempts below will fail." ) target = "/kaggle/working/JHU_daily_us_7_days_ago.csv" if not os.path.isfile(target): JHU_datestr_minus_7 = datetime_to_JHU_date( JHU_datetime - datetime.timedelta(days=7) ) urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + JHU_datestr_minus_7 + ".csv", target, ) target = "/kaggle/working/JHU_daily_us_14_days_ago.csv" if not os.path.isfile(target): JHU_datestr_minus_14 = datetime_to_JHU_date( JHU_datetime - datetime.timedelta(days=14) ) urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + JHU_datestr_minus_14 + ".csv", target, ) target = "/kaggle/working/JHU_daily_us_21_days_ago.csv" if not os.path.isfile(target): JHU_datestr_minus_21 = datetime_to_JHU_date( JHU_datetime - datetime.timedelta(days=21) ) urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + JHU_datestr_minus_21 + ".csv", target, ) target = "/kaggle/working/JHU_daily_us_28_days_ago.csv" if not os.path.isfile(target): JHU_datestr_minus_28 = datetime_to_JHU_date( JHU_datetime - datetime.timedelta(days=28) ) urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + JHU_datestr_minus_28 + ".csv", target, ) # Read in the data df_confirmed = pd.read_csv("/kaggle/working/JHU_TS_US_confirmed_" + today + ".csv") df_deaths = pd.read_csv("/kaggle/working/JHU_TS_US_deaths_" + today + ".csv") df_TX_tests = pd.read_excel( "/kaggle/working/TX_HHS_cumulative_tests_county_" + today + ".xlsx", skiprows=[0] ) # get first (only) sheet, skip first row try: df_TX_pos_rate = pd.read_excel( "/kaggle/working/TX_HHS_" + today + ".xlsx", sheet_name="Molecular Positivity Rate", skiprows=[], ) _ = df_TX_pos_rate["Molecular Positivity Rate (Previous 7 Days)"] except: df_TX_pos_rate = pd.read_excel( "/kaggle/working/TX_HHS_" + today + ".xlsx", sheet_name="Molecular Positivity Rate", skiprows=[0], ) _ = df_TX_pos_rate["Molecular Positivity Rate (Previous 7 Days)"] df_TX_pos_rate = df_TX_pos_rate.drop(df_TX_pos_rate.tail(2).index) try: df_TX_hospitalizations = pd.read_excel( "/kaggle/working/TX_HHS_" + today + ".xlsx", sheet_name="Hospitalization by Day", skiprows=[0, 1], ) # df_TX_hospitalizations.drop('Obs', axis=1, inplace=True) except: df_TX_hospitalizations = pd.read_excel( "/kaggle/working/TX_HHS_" + today + ".xlsx", sheet_name="Hospitalization by Day", skiprows=[0], ) # df_TX_hospitalizations.drop('Obs', axis=1, inplace=True) df_JHU_daily_us = pd.read_csv("/kaggle/working/JHU_daily_us_most_recent.csv") df_JHU_daily_us_m7 = pd.read_csv("/kaggle/working/JHU_daily_us_7_days_ago.csv") df_JHU_daily_us_m14 = pd.read_csv("/kaggle/working/JHU_daily_us_14_days_ago.csv") df_JHU_daily_us_m21 = pd.read_csv("/kaggle/working/JHU_daily_us_21_days_ago.csv") df_JHU_daily_us_m28 = pd.read_csv("/kaggle/working/JHU_daily_us_28_days_ago.csv") for k, df in enumerate( [ df_JHU_daily_us, df_JHU_daily_us_m7, df_JHU_daily_us_m14, df_JHU_daily_us_m21, df_JHU_daily_us_m28, ] ): to_drop_list = df["Country_Region"] != "US" to_drop = [] for i in range(len(df)): if to_drop_list[i]: to_drop.append(i) if len(to_drop) > 0: print( "Warning: Extra data in JHU daily data index " + str(k) + ". (This is an error on their end that we have corrected for.)" ) df.drop(to_drop, axis=0, inplace=True) first_date_col_index = 12 # This is the first col index for the dates in the time series for df_deaths and, after the next couple of lines have run, for df_confirmed. Update this if the dataset format changes. # Make the columns for df_confirmed and df_deaths consistent. if "Population" not in df_confirmed.columns: df_confirmed.insert(first_date_col_index - 1, "Population", df_deaths["Population"]) # Do some data validation. for col in zip(df_deaths.columns, df_confirmed.columns): if not col[0] == col[1]: print("Problem here", col) if df_deaths.columns[first_date_col_index - 1] != "Population": print("Problem: Population column is not correct in df_deaths.") if df_confirmed.columns[first_date_col_index - 1] != "Population": print("Problem: Population column is not correct in df_confirmed.") if df_deaths.columns[first_date_col_index] != "1/22/20": print("First date is not 1/22/20 in df_deaths.") if df_confirmed.columns[first_date_col_index] != "1/22/20": print("First date is not 1/22/20 in df_confirmed.") if "April 21" not in df_TX_tests.columns[1]: print("First date in df_TX_tests is not April 21") # regularize the dates in df_TX_tests to be the same as those in df_deaths and df_confirmed start_cols = df_TX_tests.columns.tolist()[1:] cols = df_confirmed.columns.tolist() cols = cols[cols.index("4/21/20") : len(cols)] assert len(cols) >= len(start_cols) renamer = dict(zip(start_cols, cols)) df_TX_tests = df_TX_tests.rename(columns=renamer) # fill na values df_TX_tests.replace("-", np.nan, inplace=True) df_TX_tests.replace("--", np.nan, inplace=True) df_TX_tests = df_TX_tests.fillna(method="ffill", axis=1) df_TX_pos_rate.replace(".", np.nan, inplace=True) df_TX_pos_rate.replace("-", np.nan, inplace=True) df_TX_pos_rate.replace("--", np.nan, inplace=True) # df_TX_pos_rate[['COVID-19\nPositivity\nRate' ,'New Viral Tests Reported* (Average of previous 7 days)']] = df_TX_pos_rate[['COVID-19\nPositivity\nRate' ,'New Viral Tests Reported* (Average of previous 7 days)']].bfill() # data exploration pt = 0 if pt: print(df_confirmed.head(90), "\n") print(df_deaths.head(5), "\n") for i, col in enumerate(df_confirmed.columns): print(i, col) if i > 12: print("...") break # define processing functions and constants dates = df_confirmed.columns[first_date_col_index:] day_MA = 7 day_MA_2 = 14 def time_series(df, states="all", counties="all"): """states, counties are "all" or a list""" if states == "all": this_slice = df else: this_slice = df.loc[ df.Province_State.isin(states), : ] # first get all columns for these states if counties == "all": this_slice = this_slice else: this_slice = this_slice.loc[ df.Admin2.isin(counties), : ] # then get all the columns for these counties this_slice = this_slice.iloc[ :, first_date_col_index: ] # next, get all the day-by-day rows (1/22/2020 - present) TS = this_slice.sum(axis=0) # finally, sum these rows TS = pd.Series(TS, name="thing") return TS def get_population(states="all", counties="all"): df = df_confirmed """states, counties are "all" or a list""" if states == "all": this_slice = df else: this_slice = df.loc[ df.Province_State.isin(states), : ] # first get all columns for these states if counties == "all": this_slice = this_slice else: this_slice = this_slice.loc[ df.Admin2.isin(counties), : ] # then get all the columns for these counties this_slice = this_slice["Population"] return this_slice.sum() def daily_change_in_time_series(TS): res = TS.rolling(window=2).apply(lambda x: x.iloc[1] - x.iloc[0]) res.iloc[0] = TS.iloc[0] return res def trailing_moving_average(TS, n=7): TS = pd.Series(TS) return TS.rolling(n).mean() def single_area_graph( df, title, states, counties, difference=None, interactive=False, y_range_override=None, ): ignore_left_points = 39 # initial number of datapoints not plotted (but still computed). Set to 39 to start the graphs at 3/1/2020 TS = time_series(df, states=states, counties=counties) if difference is not None: TS_2 = time_series( df, states=difference["states"], counties=difference["counties"] ) TS = TS - TS_2 TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA) MA_2 = trailing_moving_average(TS_daily, day_MA_2) what = MA trace1 = go.Scatter( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", name="{}-day MA".format(day_MA), marker={"color": "red"}, ) what = TS_daily trace2 = go.Scatter( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="markers", name="daily val", marker={"color": "blue", "size": 4, "opacity": 0.5}, ) what = MA_2 trace3 = go.Scatter( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", name="{}-day MA".format(day_MA_2), marker={"color": "rgba(0,160,0,0.7)"}, ) data = [trace1, trace3, trace2] legend = {"orientation": "v", "x": 0, "y": -0.35} if y_range_override is not None: y_range = y_range_override else: y_range = 1.2 * MA.values[ignore_left_points:].max() layout = { "title": {"text": title, "x": 0}, "xaxis": {"title": "date"}, "yaxis": {"range": [0, y_range]}, "margin": {"l": 0, "r": 0, "t": 50}, "legend": legend, } fig = {"data": data, "layout": layout} if interactive: iplot(fig, config={"displayModeBar": False}) else: fig = go.Figure(data=data, layout=layout) config = dict(displayModeBar=False) fig.show(config=config, renderer="png", width=800) # IMPORTANT NOTE: Beginning around August 1, most states started reporting far fewer daily tests than they did during the month of July. # For example, during the last week of July, Texas reported an average of 62546 daily tests, but during the first week of August, reported an average of only 47254 daily tests. # Therefore, a decreasing number of daily new confirmed cases in the graphs below may correspond more to a decrease in the number of tests performed than a decrease in the true number of new infections. # make graphs single_area_graph( df_confirmed, "USA daily new confirmed cases", states="all", counties="all" ) single_area_graph( df_confirmed, "USA minus NY and NJ daily new confirmed cases", states="all", counties="all", difference={"states": ["New York", "New Jersey"], "counties": "all"}, ) single_area_graph( df_confirmed, "TX daily new confirmed cases", states=["Texas"], counties="all" ) ignore_left_points = 50 data = [] today_data = [] colorwheel = 3 for state in all_states: TS = time_series(df_confirmed, states=[state], counties="all") TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA) MA_2 = trailing_moving_average(TS_daily, day_MA_2) what = MA if state == "Texas": color = "red" opacity = 1 elif state == "New York": color = "black" opacity = 0.5 elif state == "Arizona": color = "DarkOrange" opacity = 0.75 elif state == "Florida": color = "rgb(50,100,50)" opacity = 0.5 elif state == "Nevada": color = "DarkBlue" opacity = 0.5 else: color = "hsl({},50%,30%)".format(colorwheel) # color = 'hsl(100,90%,30%)' colorwheel += 360 / 50 opacity = 0.1 what = ( what / get_population(states=[state], counties="all") * 100000 ) # switch to new cases per 100,000 trace = go.Scatter( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", opacity=opacity, marker={"color": color}, name=state, ) data.append(trace) today_data.append(what.values[-1]) today_data = pd.DataFrame( index=all_states, data={ "Daily new cases per 100,000 residents, average of last 7 days (lower is better)": today_data }, ) # legend = {'orientation':'v', 'x':0, 'y':-0.35} legend = {} layout = { "title": { "text": "Daily new cases per 100,000 residents, 7-day moving average (Texas in red)", "x": 0, }, "legend": legend, "margin": {"l": 0, "r": 0, "t": 50}, "showlegend": True, } fig = go.Figure({"data": data, "layout": layout}) fig.show() today_data = today_data.sort_values( "Daily new cases per 100,000 residents, average of last 7 days (lower is better)", ascending=False, ) today_data.head(50) # graph new confirmed cases by state for all 50 states + DC ignore_left_points = 39 # initial number of datapoints not plotted (but still computed). Set to 39 to start the graphs at 3/1/2020 df = df_confirmed fig = make_subplots( rows=17, cols=3, subplot_titles=all_states, shared_xaxes=True, vertical_spacing=0.01 ) for i_state, state in enumerate(all_states): states = [state] counties = "all" TS = time_series(df, states=states, counties=counties) TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA) MA_2 = trailing_moving_average(TS_daily, day_MA_2) row, col = divmod(i_state, 3) row += 1 col += 1 what = MA fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", marker={"color": "red"}, name="{}-day MA".format(day_MA), ), row=row, col=col, ) what = TS_daily fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="markers", marker={"color": "blue", "size": 2, "opacity": 0.5}, name="daily val", ), row=row, col=col, ) what = MA_2 fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", marker={"color": "rgba(0,160,0,0.7)"}, name="{}-day MA".format(day_MA_2), ), row=row, col=col, ) fig.update_yaxes( range=[0, 1.2 * MA.values[ignore_left_points:].max()], row=row, col=col ) # if i_state>6: # break legend = {"orientation": "v", "x": 0, "y": -0.35} layout = { "title": {"text": "Daily new confirmed cases by state", "x": 0}, "height": 3000, "width": 1000, "showlegend": False, } #'xaxis':{'title':'date'}, 'yaxis':{'range':[0,1.2*MA.values[ignore_left_points:].max()]}, 'margin':{'l':0, 'r':0, 't':50}, } fig.update_layout(layout) fig.update_layout(margin={"l": 0, "r": 0}) # , row=row, col=col) config = dict(displayModeBar=False) for i in fig["layout"]["annotations"]: i["font"]["size"] = 12 # change title size for the subgraphs fig.show(config=config, renderer="png", width=800, height=2000) # fig = {'data':data, 'layout':layout} # iplot(fig, config={'displayModeBar':False}) # IMPORTANT NOTE: The following graph is created from TX HHS data. title = "TX active COVID-19 hospitalizations" trace1 = go.Scatter( x=df_TX_hospitalizations.Date, y=df_TX_hospitalizations.Hospitalizations, mode="lines+markers", name="Hopsitalizations", marker={"color": "red", "opacity": 0.5}, ) data = [trace1] legend = {"orientation": "v", "x": 0, "y": -0.35} layout = { "title": {"text": title, "x": 0}, "xaxis": {}, "yaxis": { "range": [0, 1.2 * df_TX_hospitalizations.Hospitalizations.max()], "title": "People hospitalized", }, "margin": {"l": 0, "r": 0, "t": 50}, "legend": legend, } fig = {"data": data, "layout": layout} fig = go.Figure(data=data, layout=layout) config = dict(displayModeBar=False) fig.show(config=config, renderer="png", width=800) if ( 1 ): # Why can't these ****ers keep their spreadsheet structure the same from day to day? title = "TX new molecular tests and percentage of new tests positive by specimen collection date (TX HHS data)" pd.Series.reverse = pd.DataFrame.reverse = lambda self: self[::-1] fig = make_subplots(specs=[[{"secondary_y": True}]]) # mind the reverse()'s fig.add_trace( go.Scattergl( x=df_TX_pos_rate["Specimen Collection Date"].reverse(), y=df_TX_pos_rate["Molecular Positivity Rate (Previous 7 Days)"].reverse(), name="% of new tests positive (7-day MA)", mode="lines+markers", marker={"opacity": 0.5}, ), secondary_y=False, ) # fig.add_trace( go.Scattergl(x = df_TX_pos_rate['Date'], y = df_TX_pos_rate['New Viral Tests Reported* (Average of previous 7 days)'], name = '# of new tests (7-day MA)', mode='lines+markers', marker={'opacity':0.5}), secondary_y = True ) # fig.add_trace( go.Scattergl(x = df_TX_pos_rate['Specimen Collection Date'], y = trailing_moving_average(df_TX_pos_rate['Test Results'].reverse()+df_TX_pos_rate['New Test Results'].reverse(), 7).reverse(), name = 'Avg # of daily tests in last 7 days', mode='lines+markers', marker={'opacity':0.5}), secondary_y = True ) fig.add_trace( go.Scattergl( x=df_TX_pos_rate["Specimen Collection Date"].reverse(), y=trailing_moving_average( df_TX_pos_rate["Test Results"] + df_TX_pos_rate["New Test Results"], 7 ).reverse(), name="Avg # of daily tests in last 7 days", mode="lines+markers", marker={"opacity": 0.5}, ), secondary_y=True, ) legend = {"orientation": "v", "x": 0, "y": -0.35} layout = { "title": {"text": title, "x": 0}, "margin": {"l": 0, "r": 0, "t": 50}, "legend": legend, } fig.update_layout(layout) config = dict(displayModeBar=False) fig.show( config=config, renderer="png", width=800, ) # examine time for number of cases to increase by number of active cases (which isn't the same thing as active cases doubling) # The following table displays an approximation of the doubling time for the number of active cases. Specifically, it shows the number of days it would take for the cumulative number of cases in each state to increase by the number of currently active cases in that state if the state's case growth rate (see graphs immediately above) does not change. This is not exactly the same as the doubling time for the number of active cases, because some currently-active cases will resolve in this time. # States with an accelerating number of cases will take less time than displayed, and cases with a decelerating number of cases will take more time. # It is possible for a state to see an accelerating number of cases due to an increase in testing. This table includes information on whether daily testing for each state is increasing, decreasing, or flat. # States with fewer than 1000 active cases are excluded from this table. if 0: # This code isn't elegant. Sorry. Also something in the data starting around 3/10/2021 is causing a division by 0 error, and I don't see the point in trying to fix it, since this table hasn't been very useful for a while now. df = df_JHU_daily_us[["Province_State", "Active"]] df = df.loc[df["Province_State"].isin(all_states), :] df_active = df.set_index("Province_State") accel_points_1 = [] most_recent_MA_day_case_increase = [] for state in all_states: TS = time_series(df_confirmed, states=[state], counties="all") TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA) most_recent_MA_day_case_increase.append(MA[-1]) accel_points_1.append(MA[-14]) df_MA_newest_incr_rate = pd.DataFrame( index=all_states, data={"incr_rate": most_recent_MA_day_case_increase} ) df_MA_incr_rate_14_days_ago = pd.DataFrame( index=all_states, data={"incr_rate": accel_points_1} ) df1 = df_active["Active"] / df_MA_newest_incr_rate["incr_rate"] accel_points_2 = [] most_recent_MA_2_day_case_increase = [] for state in all_states: TS = time_series(df_confirmed, states=[state], counties="all") TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA_2) most_recent_MA_2_day_case_increase.append(MA[-1]) accel_points_2.append(MA[-14]) df_MA_2_newest_incr_rate = pd.DataFrame( index=all_states, data={"incr_rate": most_recent_MA_2_day_case_increase} ) df_MA_2_incr_rate_14_days_ago = pd.DataFrame( index=all_states, data={"incr_rate": accel_points_2} ) df2 = df_active["Active"] / df_MA_2_newest_incr_rate["incr_rate"] r0, r1, rr0, rr1 = ( df_MA_incr_rate_14_days_ago["incr_rate"], df_MA_newest_incr_rate["incr_rate"], df_MA_2_incr_rate_14_days_ago["incr_rate"], df_MA_2_newest_incr_rate["incr_rate"], ) is_accel = (1.1 * r0 < r1) & (1.1 * rr0 < rr1) # vectorized op is_decel = (1.1 * r1 < r0) & (1.1 * rr1 < rr0) is_neither = (~is_accel) & (~is_decel) df_JHU_daily_us.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m7.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m14.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m21.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m28.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 def compute_tests_incr_or_decr(return_rate=True): df_daily_most_recent = df_JHU_daily_us[ ["Province_State", "People_Tested"] ].copy() df_daily_m7 = df_JHU_daily_us_m7[["Province_State", "People_Tested"]].copy() df_daily_m14 = df_JHU_daily_us_m14[["Province_State", "People_Tested"]].copy() df_daily_m21 = df_JHU_daily_us_m21[["Province_State", "People_Tested"]].copy() df_daily_m28 = df_JHU_daily_us_m28[["Province_State", "People_Tested"]].copy() for df in [ df_daily_most_recent, df_daily_m7, df_daily_m14, df_daily_m21, df_daily_m28, ]: df.set_index("Province_State", inplace=True) df_MR_14_avg_tests = (df_daily_most_recent - df_daily_m14) / 14 df_MR_7_avg_tests = (df_daily_most_recent - df_daily_m7) / 7 df_7_days_ago_7_avg_tests = (df_daily_m7 - df_daily_m14) / 7 df_7_days_ago_14_avg_tests = (df_daily_m7 - df_daily_m21) / 14 df_14_days_ago_14_avg_tests = (df_daily_m14 - df_daily_m28) / 14 # To be increasing, we should be doing at least 3% more tests now than a week ago, and 10% now than two weeks ago. tests_incr = ( df_MR_7_avg_tests["People_Tested"] > 1.03 * df_7_days_ago_7_avg_tests["People_Tested"] ) & ( df_MR_14_avg_tests["People_Tested"] > 1.10 * df_14_days_ago_14_avg_tests["People_Tested"] ) tests_decr = ( df_MR_7_avg_tests["People_Tested"] < 0.97 * df_7_days_ago_7_avg_tests["People_Tested"] ) & ( df_MR_14_avg_tests["People_Tested"] < 1 * df_14_days_ago_14_avg_tests["People_Tested"] ) tests_neither = (~tests_incr) & (~tests_decr) to_return = [tests_incr, tests_decr, tests_neither] if return_rate: to_return.append( ( (df_MR_14_avg_tests / df_7_days_ago_14_avg_tests - 1) * 100 + (df_MR_14_avg_tests / df_14_days_ago_14_avg_tests - 1) * 100 ) / 2 ) # average the testing increase rates measured two different ways return to_return ( tests_incr, tests_decr, tests_neither, df_tests_incr_rate, ) = compute_tests_incr_or_decr() df_min = df1.combine(df2, min) df_min = df_min.rename("min") df_max = df1.combine(df2, max) df_max = df_max.rename("max") df = pd.DataFrame(df_min) # df['Tests are up by...'] = df_tests_incr_rate df["max"] = df_max df = df.loc[ df_active["Active"] >= 1000, : ].copy() # exclude states with <1000 active cases df = df.sort_values("min") df["Days until # cases increases by # active cases (larger is better)"] = df[ "min" ].combine(df["max"], lambda x, y: "{:.2f} to {:.2f}".format(x, y)) df.drop(["max", "min"], axis=1, inplace=True) df["Number of active cases"] = df_active["Active"].astype(int) df["Cases accelerating?"] = is_accel.apply( lambda x: "Accelerating" if x is True else None ) df.loc[is_decel, "Cases accelerating?"] = "Decelerating" df.loc[is_neither, "Cases accelerating?"] = "Neither / Unclear" df["Tests increasing?"] = tests_incr.apply( lambda x: "Increasing" if x is True else None ) df.loc[tests_decr, "Tests increasing?"] = "Decreasing" df.loc[ tests_neither, "Tests increasing?" ] = "Neither clearly increasing nor clearly decreasing" def color_cells(val): if val in ["Accelerating", "Decreasing"]: color = "red" elif val in ["Decelerating", "Increasing"]: color = "green" else: color = None return "color: %s" % color df = df.reset_index() df = df.rename( columns={ "Province_State": "State", "Cases accelerating?": "Cases are...", "Tests increasing?": "Daily tests are...", } ) # df = df.drop(columns = ['Tests are...'], axis = 1) df = df.set_index("State") df.head(51).style.applymap(color_cells) # figure out how to describe this, then show the output # As tests rise, so do the number of cases. If cases are rising faster than tests, that means the percentage of tests that are positive is increasing. When this happens, this is very bad. At minimum, as we expand testing we should hope that the number of cases grows no more rapildy than the number of tests, and ideally, we would like to see the number of tests growing more rapidly than the number of cases. # Compute 7 and 14 day percentage increase in tests and cases df_JHU_daily_us.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m7.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m14.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m21.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m28.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_tests_cases = df_JHU_daily_us[["Province_State", "People_Tested", "Confirmed"]] df_tests_cases_m7 = df_JHU_daily_us_m7[["Province_State", "People_Tested", "Confirmed"]] df_tests_cases_m14 = df_JHU_daily_us_m14[ ["Province_State", "People_Tested", "Confirmed"] ] for df_TC in [df_tests_cases, df_tests_cases_m7, df_tests_cases_m14]: df_TC.set_index("Province_State", inplace=True) df_tests_cases_incr_14_days = ((df_tests_cases / df_tests_cases_m14) - 1) * 100 df_tests_cases_incr_7_days = ((df_tests_cases / df_tests_cases_m7) - 1) * 100 df_tests_cases_incr_7_days[ "Total test growth (last 7 days)" ] = df_tests_cases_incr_7_days["People_Tested"].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_7_days[ "Total case growth (last 7 days)" ] = df_tests_cases_incr_7_days["Confirmed"].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_14_days[ "Total test growth (last 14 days)" ] = df_tests_cases_incr_14_days["People_Tested"].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_14_days[ "Total case growth (last 14 days)" ] = df_tests_cases_incr_14_days["Confirmed"].apply(lambda x: "{:.1f}%".format(x)) df = df_tests_cases_incr_7_days[ ["Total case growth (last 7 days)", "Total test growth (last 7 days)"] ].join( df_tests_cases_incr_14_days[ ["Total case growth (last 14 days)", "Total test growth (last 14 days)"] ] ) df = df.reset_index() df = df.loc[df["Province_State"].isin(all_states), :] df = df.rename(columns={"Province_State": "State"}) df = df.set_index("State") df.head(51) # Note: As of July 5, 2021, Walker County no longer provides a breakdown of COVID cases into free population vs. imprisoned population. The following graph includes the full population of Walker county. # legacy code: not doing this any more # add Walker Free info to df_confirmed. Only add columns that are already in df_confirmed. (We only want to graph dates where we have all the info for everywhere.) # for col in df_confirmed.columns.values: # if col not in df_Walker_TX_free: # df_Walker_TX_free[col] = np.nan # for col in df_Walker_TX_free.columns.values: # if col not in df_confirmed: # df_Walker_TX_free.drop(col, axis=1, inplace=True) # if sum(df_confirmed['Admin2']=='Walker Free') == 0: #if we haven't added Walker Free info yet # df_confirmed = df_confirmed.append(df_Walker_TX_free, ignore_index = True) # single_area_graph(df_confirmed, 'Walker County, TX daily new confirmed cases', states=['Texas'], counties=['Walker']) single_area_graph( df_confirmed, "Walker County, TX daily new confirmed cases", states=["Texas"], counties=["Walker"], y_range_override=50, ) single_area_graph( df_confirmed, "Walker County, TX and surrounding areas daily new confirmed cases", states=["Texas"], counties=[ "Walker", "Harris", "Montgomery", "Grimes", "Brazos", "San Jacinto", "Trinity", "Houston", "Madison", ], ) single_area_graph( df_confirmed, "Harris County, TX daily new confirmed cases", states=["Texas"], counties=["Harris"], ) single_area_graph( df_confirmed, "Montgomery County, TX daily new confirmed cases", states=["Texas"], counties=["Montgomery"], ) single_area_graph( df_confirmed, "Nueces County, TX daily new confirmed cases", states=["Texas"], counties=["Nueces"], ) single_area_graph( df_confirmed, "San Patricio County, TX daily new confirmed cases", states=["Texas"], counties=["San Patricio"], ) single_area_graph( df_confirmed, "Douglas County, NV daily new confirmed cases", states=["Nevada"], counties=["Douglas"], ) single_area_graph( df_confirmed, "El Dorado County, CA daily new confirmed cases", states=["California"], counties=["El Dorado"], ) single_area_graph( df_confirmed, "Placer County, CA daily new confirmed cases", states=["California"], counties=["Placer"], ) single_area_graph( df_confirmed, "Sacramento County, CA daily new confirmed cases", states=["California"], counties=["Sacramento"], ) single_area_graph( df_confirmed, "Fulton County, GA daily new confirmed cases", states=["Georgia"], counties=["Fulton"], ) single_area_graph( df_confirmed, "Yavapai County, AZ daily new confirmed cases", states=["Arizona"], counties=["Yavapai"], ) single_area_graph( df_confirmed, "Maricopa County, AZ daily new confirmed cases", states=["Arizona"], counties=["Maricopa"], ) single_area_graph( df_confirmed, "Sedgwick County, KS daily new confirmed cases", states=["Kansas"], counties=["Sedgwick"], ) single_area_graph( df_confirmed, "Clark County, NV daily new confirmed cases", states=["Nevada"], counties=["Clark"], ) single_area_graph( df_confirmed, "Miami-Dade County, FL daily new confirmed cases", states=["Florida"], counties=["Miami-Dade"], ) single_area_graph(df_deaths, "USA daily deaths", states="all", counties="all") single_area_graph( df_deaths, "USA minus NY and NJ daily deaths", states="all", counties="all", difference={"states": ["New York", "New Jersey"], "counties": "all"}, ) single_area_graph(df_deaths, "TX daily deaths", states=["Texas"], counties="all") ignore_left_points = 39 # initial number of datapoints not plotted (but still computed). Set to 39 to start the graphs at 3/1/2020 df = df_deaths fig = make_subplots( rows=17, cols=3, subplot_titles=all_states, shared_xaxes=True, vertical_spacing=0.01 ) for i_state, state in enumerate(all_states): states = [state] counties = "all" TS = time_series(df, states=states, counties=counties) TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA) MA_2 = trailing_moving_average(TS_daily, day_MA_2) row, col = divmod(i_state, 3) row += 1 col += 1 what = MA fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", marker={"color": "red"}, name="{}-day MA".format(day_MA), ), row=row, col=col, ) what = TS_daily fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="markers", marker={"color": "blue", "size": 2, "opacity": 0.5}, name="daily val", ), row=row, col=col, ) what = MA_2 fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", marker={"color": "rgba(0,160,0,0.7)"}, name="{}-day MA".format(day_MA_2), ), row=row, col=col, ) fig.update_yaxes( range=[0, 1.2 * MA.values[ignore_left_points:].max()], row=row, col=col ) # if i_state>6: # break legend = {"orientation": "v", "x": 0, "y": -0.35} layout = { "title": {"text": "Daily deaths by state", "x": 0}, "height": 3000, "width": 1000, "showlegend": False, } #'xaxis':{'title':'date'}, 'yaxis':{'range':[0,1.2*MA.values[ignore_left_points:].max()]}, 'margin':{'l':0, 'r':0, 't':50}, } fig.update_layout(layout) fig.update_layout(margin={"l": 0, "r": 0}) # , row=row, col=col) config = dict(displayModeBar=False) for i in fig["layout"]["annotations"]: i["font"]["size"] = 12 # change title size for the subgraphs fig.show(config=config, renderer="png", width=800, height=2000) # development area df_tests_cases = df_JHU_daily_us[["Province_State", "People_Tested", "Confirmed"]] df_tests_cases_m7 = df_JHU_daily_us_m7[["Province_State", "People_Tested", "Confirmed"]] df_tests_cases_m14 = df_JHU_daily_us_m14[ ["Province_State", "People_Tested", "Confirmed"] ] for df_TC in [df_tests_cases, df_tests_cases_m7, df_tests_cases_m14]: df_TC.set_index("Province_State", inplace=True) df_tests_cases_incr_14_days = ((df_tests_cases / df_tests_cases_m14) - 1) * 100 df_tests_cases_incr_7_days = ((df_tests_cases / df_tests_cases_m7) - 1) * 100 df_tests_cases_incr_7_days["Tests increase (7 days)"] = df_tests_cases_incr_7_days[ "People_Tested" ].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_7_days["Cases increase (7 days)"] = df_tests_cases_incr_7_days[ "Confirmed" ].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_14_days["Tests increase (14 days)"] = df_tests_cases_incr_14_days[ "People_Tested" ].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_14_days["Cases increase (14 days)"] = df_tests_cases_incr_14_days[ "Confirmed" ].apply(lambda x: "{:.1f}%".format(x)) # df_JHU_daily_us[['Province_State','Confirmed']] - df_JHU_daily_us_m14[['Province_State','Confirmed']] time_series(df_confirmed, states=["Texas"]) get_population(states=["Texas"]) # df_TX_pos_rate.head() # df_TX_pos_rate.columns # df_JHU_daily_us.columns df_TX_pos_rate df_TX_pos_rate["Specimen Collection Date"] df_TX_pos_rate["Test Results"] time_series(df=df_deaths, states=["Texas"], counties="all").tail(9) df_TX_hospitalizations["Hospitalizations"] # with pd.option_context('display.max_rows', None, 'display.max_columns', 25): # print(df_confirmed.head())
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/696/69696591.ipynb
null
null
[{"Id": 69696591, "ScriptId": 9830234, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4995846, "CreationDate": "08/02/2021 20:24:22", "VersionNumber": 195.0, "Title": "USA/TX COVID-19 tracking", "EvaluationDate": "08/02/2021", "IsChange": false, "TotalLines": 1135.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 1135.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Ignore how it says "no data sources" above. That means there are no *static* data sources. Here are the data sources for this notebook, which are accessed at runtime to create the graphs below: # * [JHU github repository](https://github.com/CSSEGISandData/COVID-19) # * [Texas HHS](https://dshs.texas.gov/news/updates.shtm) # * [Walker County, TX Office of Emergency Management](https://www.co.walker.tx.us/department/index.php?structureid=17) # This is a notebook for visualizing trends in COVID-19 data that I have not seen elsewhere. This notebook is meant to complement (not replace) other trackers, such as [worldometer](https://www.worldometers.info/coronavirus/). Other great trackers include [ProPublica's tracker](https://projects.propublica.org/reopening-america/), [covid19-projections](https://covid19-projections.com/about/), [CovidActNow](https://covidactnow.org/us/tx/?s=44750), and the [Texas Tribune tracker](https://apps.texastribune.org/features/2020/texas-coronavirus-cases-map/). # This notebook will be updated with new graphs approximately weekly. # County-level visualizations will remain focused on areas important to me. If you would like to see areas other than those depicted below and you know me, please ask. Otherwise, you may fork the notebook and modify it accordingly. The necessary modifications should be straightforward. # TODO: # -test incr vs cases incr, last 7 and 14 days - the computations are done, but think about the presentation import sys import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import urllib.request import datetime import plotly.graph_objs as go from plotly.offline import iplot from plotly.subplots import make_subplots # data source: https://www.co.walker.tx.us/department/index.php?structureid=17 # unfortunately I have to update this one by hand. UGH. df_Walker_TX_free = pd.DataFrame( { "Province_State": ["Texas"], "Admin2": ["Walker Free"], "Country_Region": ["US"], "3/22/20": [1], "3/23/20": [1], "3/24/20": [1], "3/25/20": [1], "3/26/20": [2], "3/27/20": [2], "3/28/20": [3], "3/29/20": [3], "3/30/20": [3], "3/31/20": [6], "4/1/20": [7], "4/2/20": [9], "4/3/20": [10], "4/4/20": [10], "4/5/20": [10], "4/6/20": [15], "4/7/20": [15], "4/8/20": [16], "4/9/20": [18], "4/10/20": [18], "4/11/20": [18], "4/12/20": [18], "4/13/20": [21], "4/14/20": [26], "4/15/20": [41], "4/16/20": [43], "4/17/20": [44], "4/18/20": [44], "4/19/20": [44], "4/20/20": [49], "4/21/20": [51], "4/22/20": [59], "4/23/20": [61], "4/24/20": [65], "4/25/20": [65], "4/26/20": [65], "4/27/20": [69], "4/28/20": [83], "4/29/20": [83], "4/30/20": [104], "5/1/20": [108], "5/2/20": [108], "5/3/20": [108], "5/4/20": [114], "5/5/20": [128], "5/6/20": [128], "5/7/20": [128], "5/8/20": [128], "5/9/20": [128], "5/10/20": [128], "5/11/20": [131], "5/12/20": [131], "5/13/20": [137], "5/14/20": [137], "5/15/20": [137], "5/16/20": [137], "5/17/20": [137], "5/18/20": [144], "5/19/20": [150], "5/20/20": [151], "5/21/20": [151], "5/22/20": [151], "5/23/20": [151], "5/24/20": [151], "5/25/20": [159], "5/26/20": [167], "5/27/20": [172], "5/28/20": [172], "5/29/20": [172], "5/30/20": [172], "5/31/20": [184], "6/1/20": [188], "6/2/20": [193], "6/3/20": [194], "6/4/20": [201], "6/5/20": [201], "6/6/20": [201], "6/7/20": [210], "6/8/20": [210], "6/9/20": [211], "6/10/20": [217], "6/11/20": [220], "6/12/20": [223], "6/13/20": [223], "6/14/20": [223], "6/15/20": [242], "6/16/20": [248], "6/17/20": [265], "6/18/20": [274], "6/19/20": [283], "6/20/20": [283], "6/21/20": [283], "6/22/20": [306], "6/23/20": [320], "6/24/20": [373], "6/25/20": [393], "6/26/20": [415], "6/27/20": [415], "6/28/20": [415], "6/29/20": [452], "6/30/20": [465], "7/1/20": [499], "7/2/20": [541], "7/3/20": [541], "7/4/20": [541], "7/5/20": [541], "7/6/20": [628], "7/7/20": [656], "7/8/20": [665], "7/9/20": [697], "7/10/20": [715], "7/11/20": [715], "7/12/20": [715], "7/13/20": [773], "7/14/20": [773], "7/15/20": [831], "7/16/20": [848], "7/17/20": [848], "7/18/20": [848], "7/19/20": [848], "7/20/20": [934], "7/21/20": [970], "7/22/20": [1007], "7/23/20": [1021], "7/24/20": [1030], "7/25/20": [1030], "7/26/20": [1030], "7/27/20": [1053], "7/28/20": [1076], "7/29/20": [1089], "7/30/20": [1098], "7/31/20": [1107], "8/1/20": [1107], "8/2/20": [1107], "8/3/20": [1136], "8/4/20": [1145], "8/5/20": [1157], "8/6/20": [1172], "8/7/20": [1185], "8/8/20": [1185], "8/9/20": [1185], "8/10/20": [1239], "8/11/20": [1286], "8/12/20": [1316], "8/13/20": [1347], "8/14/20": [1401], "8/15/20": [1401], "8/16/20": [1401], "8/17/20": [1422], "8/18/20": [1426], "8/19/20": [1433], "8/20/20": [1447], "8/21/20": [1489], "8/22/20": [1489], "8/23/20": [1489], "8/24/20": [1560], "8/25/20": [1582], "8/26/20": [1613], "8/27/20": [1635], "8/28/20": [1668], "8/29/20": [1668], "8/30/20": [1668], "8/31/20": [1711], "9/1/20": [1728], "9/2/20": [1738], "9/3/20": [1750], "9/4/20": [1750], "9/5/20": [1750], "9/6/20": [1750], "9/7/20": [1750], "9/8/20": [1781], "9/9/20": [1802], "9/10/20": [1825], "9/11/20": [1838], "9/12/20": [1838], "9/13/20": [1838], "9/14/20": [1966], "9/15/20": [2005], "9/16/20": [2038], "9/17/20": [2058], "9/18/20": [2086], "9/19/20": [2086], "9/20/20": [2086], "9/21/20": [2129], "9/22/20": [2165], "9/23/20": [2174], "9/24/20": [2191], "9/25/20": [2209], "9/26/20": [2209], "9/27/20": [2209], "9/28/20": [2246], "9/29/20": [2265], "9/30/20": [2294], "10/1/20": [2305], "10/2/20": [2312], "10/3/20": [2312], "10/4/20": [2312], "10/5/20": [2325], "10/6/20": [2370], "10/7/20": [2376], "10/8/20": [2393], "10/9/20": [2397], "10/10/20": [2397], "10/11/20": [2397], "10/12/20": [2397], "10/13/20": [2425], "10/14/20": [2451], "10/15/20": [2465], "10/16/20": [2465], "10/17/20": [2465], "10/18/20": [2465], "10/19/20": [2483], "10/20/20": [2499], "10/21/20": [2513], "10/22/20": [2524], "10/23/20": [2529], "10/24/20": [2529], "10/25/20": [2529], "10/26/20": [2552], "10/27/20": [2585], "10/28/20": [2601], "10/29/20": [2609], "10/30/20": [2609], "10/31/20": [2609], "11/1/20": [2609], "11/2/20": [2653], "11/3/20": [2665], "11/4/20": [2666], "11/5/20": [2675], "11/6/20": [2690], "11/7/20": [2690], "11/8/20": [2690], "11/9/20": [2707], "11/10/20": [2743], "11/11/20": [2743], "11/12/20": [2777], "11/13/20": [2811], "11/14/20": [2811], "11/15/20": [2811], "11/16/20": [2840], "11/17/20": [2908], "11/18/20": [2910], "11/19/20": [2956], "11/20/20": [2999], "11/21/20": [2999], "11/22/20": [2999], "11/23/20": [3033], "11/24/20": [3136], "11/25/20": [3167], "11/26/20": [3167], "11/27/20": [3167], "11/28/20": [3167], "11/29/20": [3167], "11/30/20": [3221], "12/1/20": [3332], "12/2/20": [3423], "12/3/20": [3434], "12/4/20": [3494], "12/5/20": [3494], "12/6/20": [3494], "12/7/20": [3515], "12/8/20": [3555], "12/9/20": [3565], "12/10/20": [3573], "12/11/20": [3573], "12/12/20": [3573], "12/13/20": [3573], "12/14/20": [3623], "12/15/20": [3702], "12/16/20": [3731], "12/17/20": [3762], "12/18/20": [3800], "12/19/20": [3800], "12/20/20": [3800], "12/21/20": [3862], "12/22/20": [3943], "12/23/20": [3987], "12/24/20": [3987], "12/25/20": [3987], "12/26/20": [3987], "12/27/20": [3987], "12/28/20": [4000], "12/29/20": [4115], "12/30/20": [4201], "12/31/20": [4212], "1/1/21": [4212], "1/2/21": [4212], "1/3/21": [4212], "1/4/21": [4239], "1/5/21": [4392], "1/6/21": [4440], "1/7/21": [4466], "1/8/21": [4490], "1/9/21": [4490], "1/10/21": [4490], "1/11/21": [4509], "1/12/21": [4644], "1/13/21": [4689], "1/14/21": [4709], "1/15/21": [4795], "1/16/21": [4795], "1/17/21": [4795], "1/18/21": [4795], "1/19/21": [4837], "1/20/21": [5022], "1/21/21": [5032], "1/22/21": [5100], "1/23/21": [5100], "1/24/21": [5100], "1/25/21": [5130], "1/26/21": [5212], "1/27/21": [5232], "1/28/21": [5253], "1/29/21": [5290], "1/30/21": [5290], "1/31/21": [5290], "2/1/21": [5338], "2/2/21": [5480], "2/3/21": [5528], "2/4/21": [5545], "2/5/21": [5595], "2/6/21": [5595], "2/7/21": [5595], "2/8/21": [5664], "2/9/21": [5717], "2/10/21": [5791], "2/11/21": [5818], "2/12/21": [5847], "2/13/21": [5847], "2/14/21": [5847], "2/15/21": [5878], "2/16/21": [5878], "2/17/21": [5878], "2/18/21": [5878], "2/19/21": [5878], "2/20/21": [5878], "2/21/21": [5878], "2/22/21": [5990], "2/23/21": [6018], "2/24/21": [6033], "2/25/21": [6050], "2/26/21": [6076], "2/27/21": [6076], "2/28/21": [6076], "3/1/21": [6098], "3/2/21": [6098], "3/3/21": [6159], "3/4/21": [6162], "3/5/21": [6174], "3/6/21": [6174], "3/7/21": [6174], "3/8/21": [6186], "3/9/21": [6219], "3/10/21": [6219], "3/11/21": [6226], "3/12/21": [6242], "3/13/21": [6242], "3/14/21": [6242], "3/15/21": [6253], "3/16/21": [6285], "3/17/21": [6295], "3/18/21": [6302], "3/19/21": [6307], "3/20/21": [6307], "3/21/21": [6307], "3/22/21": [6312], "3/23/21": [6356], "3/24/21": [6365], "3/25/21": [6381], "3/26/21": [6396], "3/27/21": [6396], "3/28/21": [6396], "3/29/21": [6421], "3/30/21": [6486], "3/31/21": [6495], "4/1/21": [6505], "4/2/21": [6523], "4/3/21": [6523], "4/4/21": [6523], "4/5/21": [6535], "4/6/21": [6566], "4/7/21": [6582], "4/8/21": [6588], "4/9/21": [6603], "4/10/21": [6603], "4/11/21": [6603], "4/12/21": [6609], "4/13/21": [6647], "4/14/21": [6675], "4/15/21": [6689], "4/16/21": [6691], "4/17/21": [6691], "4/18/21": [6691], "4/19/21": [6692], "4/20/21": [6710], "4/21/21": [6731], "4/22/21": [6763], "4/23/21": [6773], "4/24/21": [6773], "4/25/21": [6773], "4/26/21": [6774], "4/27/21": [6843], "4/28/21": [6843 + 88], "4/29/21": [6843 + 88 + 12], "4/30/21": [6843 + 88 + 12 + 11], "5/1/21": [6954], "5/2/21": [6954], "5/3/21": [6954 + 21], "5/4/21": [6954 + 21 + 29], "5/5/21": [6954 + 21 + 29 + 3], "5/6/21": [6954 + 21 + 29 + 3 + 4], "5/7/21": [6954 + 21 + 29 + 3 + 4 + 6], "5/8/21": [7017], "5/9/21": [7017], "5/10/21": [7017 + 5], "5/11/21": [7017 + 5 + 23], "5/12/21": [7017 + 5 + 23 + 13], "5/13/21": [7017 + 5 + 23 + 13 + 8], "5/14/21": [7017 + 5 + 23 + 13 + 8 + 11], "5/15/21": [7077], "5/16/21": [7077], "5/17/21": [7077 + 9], "5/18/21": [7077 + 9 + 21], "5/19/21": [7077 + 9 + 21 + 13], "5/20/21": [7077 + 9 + 21 + 13 + 2], "5/21/21": [7077 + 9 + 21 + 13 + 2 + 2], "5/22/21": [7124], "5/23/21": [7124], "5/24/21": [7124 + 12], "5/25/21": [7124 + 12 + 14], "5/26/21": [7124 + 12 + 14 + 5], "5/27/21": [7124 + 12 + 14 + 5 + 6], "5/28/21": [7124 + 12 + 14 + 5 + 6 + 3], "5/29/21": [7164], "5/30/21": [7164], "5/31/21": [7164], "6/1/21": [7164 + 3], "6/2/21": [7164 + 3 + 14], "6/3/21": [7164 + 3 + 14 + 7], "6/4/21": [7164 + 3 + 14 + 7 + 1], "6/5/21": [7189], "6/6/21": [7189], "6/7/21": [7189 + 3], "6/8/21": [7189 + 3 + 7], "6/9/21": [7189 + 3 + 7 + 0], "6/10/21": [7189 + 3 + 7 + 0 + 2], "6/11/21": [7201], "6/12/21": [7201], "6/13/21": [7201], "6/14/21": [7201 + 9], "6/15/21": [7201 + 9 + 11], "6/16/21": [7201 + 9 + 11 + 1], "6/17/21": [7201 + 9 + 11 + 1 + 3], "6/18/21": [7201 + 9 + 11 + 1 + 3 + 1], "6/19/21": [7226], "6/20/21": [7226], "6/21/21": [7226 + 0], "6/22/21": [7226 + 0 + 9], "6/23/21": [7226 + 0 + 9 + 0], "6/24/21": [7226 + 0 + 9 + 0 + 2], "6/25/21": [7226 + 0 + 9 + 0 + 2 + 1], "6/26/21": [7238], "6/27/21": [7238], "6/28/21": [7238 + 0], "6/29/21": [7238 + 0 + 10], "6/30/21": [7238 + 0 + 10 + 10], "7/1/21": [7238 + 0 + 10 + 10 + 1], "7/2/21": [7238 + 0 + 10 + 10 + 1 + 3], "7/3/21": [7262], "7/4/21": [7262], "7/5/21": [7262], # no longer updated after 7/5/2021 } ) import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session def clear_working_directory(): for a, b, c in os.walk("/kaggle/working"): for f in c: if f[0] != "_": target = os.path.join(a, f) os.remove(target) # plotting imports pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt import seaborn as sns # display settings pd.options.display.width = 1200 pd.set_option("display.max_columns", 20) pd.set_option("display.max_rows", 100) # download the data # clear_working_directory() today_datetime = datetime.datetime.today() today = str(today_datetime).split(" ")[0] target = "/kaggle/working/JHU_TS_US_confirmed_" + today + ".csv" if not os.path.isfile(target): # if we haven't already downloaded the data today... urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv", target, ) target = "/kaggle/working/JHU_TS_US_deaths_" + today + ".csv" if not os.path.isfile(target): urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv", target, ) target = "/kaggle/working/TX_HHS_" + today + ".xlsx" if not os.path.isfile(target): urllib.request.urlretrieve( "https://dshs.texas.gov/coronavirus/TexasCOVID19CaseCountData.xlsx", target ) target = "/kaggle/working/TX_HHS_cumulative_tests_county_" + today + ".xlsx" if not os.path.isfile(target): urllib.request.urlretrieve( "https://dshs.texas.gov/coronavirus/TexasCOVID-19CumulativeTestsOverTimebyCounty.xlsx", target, ) def datetime_to_JHU_date(d): """d an object that has the form of datetime.datetime.today()""" s = str(d).split(" ")[0] s = s.split("-") s = "-".join([s[1], s[2], s[0]]) return s target = "/kaggle/working/JHU_daily_us_most_recent.csv" JHU_datetime = today_datetime if not os.path.isfile(target): done, attempts_remaining = False, 10 while (not done) and (attempts_remaining > 0): JHU_datestr = datetime_to_JHU_date(JHU_datetime) try: urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + JHU_datestr + ".csv", target, ) # fourteen_days_ago = date_JHU - datetime.timedelta(days=14) #Actually, I don't need this yet # urllib.request.urlretrieve('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/'+'....'+'.csv', target) done = True except: JHU_datetime = JHU_datetime - datetime.timedelta(days=1) attempts_remaining -= 1 if attempts_remaining == 0: print( "Warning: JHU daily_reports_us has not been updated in over 8 days. Many graphing attempts below will fail." ) target = "/kaggle/working/JHU_daily_us_7_days_ago.csv" if not os.path.isfile(target): JHU_datestr_minus_7 = datetime_to_JHU_date( JHU_datetime - datetime.timedelta(days=7) ) urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + JHU_datestr_minus_7 + ".csv", target, ) target = "/kaggle/working/JHU_daily_us_14_days_ago.csv" if not os.path.isfile(target): JHU_datestr_minus_14 = datetime_to_JHU_date( JHU_datetime - datetime.timedelta(days=14) ) urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + JHU_datestr_minus_14 + ".csv", target, ) target = "/kaggle/working/JHU_daily_us_21_days_ago.csv" if not os.path.isfile(target): JHU_datestr_minus_21 = datetime_to_JHU_date( JHU_datetime - datetime.timedelta(days=21) ) urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + JHU_datestr_minus_21 + ".csv", target, ) target = "/kaggle/working/JHU_daily_us_28_days_ago.csv" if not os.path.isfile(target): JHU_datestr_minus_28 = datetime_to_JHU_date( JHU_datetime - datetime.timedelta(days=28) ) urllib.request.urlretrieve( "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + JHU_datestr_minus_28 + ".csv", target, ) # Read in the data df_confirmed = pd.read_csv("/kaggle/working/JHU_TS_US_confirmed_" + today + ".csv") df_deaths = pd.read_csv("/kaggle/working/JHU_TS_US_deaths_" + today + ".csv") df_TX_tests = pd.read_excel( "/kaggle/working/TX_HHS_cumulative_tests_county_" + today + ".xlsx", skiprows=[0] ) # get first (only) sheet, skip first row try: df_TX_pos_rate = pd.read_excel( "/kaggle/working/TX_HHS_" + today + ".xlsx", sheet_name="Molecular Positivity Rate", skiprows=[], ) _ = df_TX_pos_rate["Molecular Positivity Rate (Previous 7 Days)"] except: df_TX_pos_rate = pd.read_excel( "/kaggle/working/TX_HHS_" + today + ".xlsx", sheet_name="Molecular Positivity Rate", skiprows=[0], ) _ = df_TX_pos_rate["Molecular Positivity Rate (Previous 7 Days)"] df_TX_pos_rate = df_TX_pos_rate.drop(df_TX_pos_rate.tail(2).index) try: df_TX_hospitalizations = pd.read_excel( "/kaggle/working/TX_HHS_" + today + ".xlsx", sheet_name="Hospitalization by Day", skiprows=[0, 1], ) # df_TX_hospitalizations.drop('Obs', axis=1, inplace=True) except: df_TX_hospitalizations = pd.read_excel( "/kaggle/working/TX_HHS_" + today + ".xlsx", sheet_name="Hospitalization by Day", skiprows=[0], ) # df_TX_hospitalizations.drop('Obs', axis=1, inplace=True) df_JHU_daily_us = pd.read_csv("/kaggle/working/JHU_daily_us_most_recent.csv") df_JHU_daily_us_m7 = pd.read_csv("/kaggle/working/JHU_daily_us_7_days_ago.csv") df_JHU_daily_us_m14 = pd.read_csv("/kaggle/working/JHU_daily_us_14_days_ago.csv") df_JHU_daily_us_m21 = pd.read_csv("/kaggle/working/JHU_daily_us_21_days_ago.csv") df_JHU_daily_us_m28 = pd.read_csv("/kaggle/working/JHU_daily_us_28_days_ago.csv") for k, df in enumerate( [ df_JHU_daily_us, df_JHU_daily_us_m7, df_JHU_daily_us_m14, df_JHU_daily_us_m21, df_JHU_daily_us_m28, ] ): to_drop_list = df["Country_Region"] != "US" to_drop = [] for i in range(len(df)): if to_drop_list[i]: to_drop.append(i) if len(to_drop) > 0: print( "Warning: Extra data in JHU daily data index " + str(k) + ". (This is an error on their end that we have corrected for.)" ) df.drop(to_drop, axis=0, inplace=True) first_date_col_index = 12 # This is the first col index for the dates in the time series for df_deaths and, after the next couple of lines have run, for df_confirmed. Update this if the dataset format changes. # Make the columns for df_confirmed and df_deaths consistent. if "Population" not in df_confirmed.columns: df_confirmed.insert(first_date_col_index - 1, "Population", df_deaths["Population"]) # Do some data validation. for col in zip(df_deaths.columns, df_confirmed.columns): if not col[0] == col[1]: print("Problem here", col) if df_deaths.columns[first_date_col_index - 1] != "Population": print("Problem: Population column is not correct in df_deaths.") if df_confirmed.columns[first_date_col_index - 1] != "Population": print("Problem: Population column is not correct in df_confirmed.") if df_deaths.columns[first_date_col_index] != "1/22/20": print("First date is not 1/22/20 in df_deaths.") if df_confirmed.columns[first_date_col_index] != "1/22/20": print("First date is not 1/22/20 in df_confirmed.") if "April 21" not in df_TX_tests.columns[1]: print("First date in df_TX_tests is not April 21") # regularize the dates in df_TX_tests to be the same as those in df_deaths and df_confirmed start_cols = df_TX_tests.columns.tolist()[1:] cols = df_confirmed.columns.tolist() cols = cols[cols.index("4/21/20") : len(cols)] assert len(cols) >= len(start_cols) renamer = dict(zip(start_cols, cols)) df_TX_tests = df_TX_tests.rename(columns=renamer) # fill na values df_TX_tests.replace("-", np.nan, inplace=True) df_TX_tests.replace("--", np.nan, inplace=True) df_TX_tests = df_TX_tests.fillna(method="ffill", axis=1) df_TX_pos_rate.replace(".", np.nan, inplace=True) df_TX_pos_rate.replace("-", np.nan, inplace=True) df_TX_pos_rate.replace("--", np.nan, inplace=True) # df_TX_pos_rate[['COVID-19\nPositivity\nRate' ,'New Viral Tests Reported* (Average of previous 7 days)']] = df_TX_pos_rate[['COVID-19\nPositivity\nRate' ,'New Viral Tests Reported* (Average of previous 7 days)']].bfill() # data exploration pt = 0 if pt: print(df_confirmed.head(90), "\n") print(df_deaths.head(5), "\n") for i, col in enumerate(df_confirmed.columns): print(i, col) if i > 12: print("...") break # define processing functions and constants dates = df_confirmed.columns[first_date_col_index:] day_MA = 7 day_MA_2 = 14 def time_series(df, states="all", counties="all"): """states, counties are "all" or a list""" if states == "all": this_slice = df else: this_slice = df.loc[ df.Province_State.isin(states), : ] # first get all columns for these states if counties == "all": this_slice = this_slice else: this_slice = this_slice.loc[ df.Admin2.isin(counties), : ] # then get all the columns for these counties this_slice = this_slice.iloc[ :, first_date_col_index: ] # next, get all the day-by-day rows (1/22/2020 - present) TS = this_slice.sum(axis=0) # finally, sum these rows TS = pd.Series(TS, name="thing") return TS def get_population(states="all", counties="all"): df = df_confirmed """states, counties are "all" or a list""" if states == "all": this_slice = df else: this_slice = df.loc[ df.Province_State.isin(states), : ] # first get all columns for these states if counties == "all": this_slice = this_slice else: this_slice = this_slice.loc[ df.Admin2.isin(counties), : ] # then get all the columns for these counties this_slice = this_slice["Population"] return this_slice.sum() def daily_change_in_time_series(TS): res = TS.rolling(window=2).apply(lambda x: x.iloc[1] - x.iloc[0]) res.iloc[0] = TS.iloc[0] return res def trailing_moving_average(TS, n=7): TS = pd.Series(TS) return TS.rolling(n).mean() def single_area_graph( df, title, states, counties, difference=None, interactive=False, y_range_override=None, ): ignore_left_points = 39 # initial number of datapoints not plotted (but still computed). Set to 39 to start the graphs at 3/1/2020 TS = time_series(df, states=states, counties=counties) if difference is not None: TS_2 = time_series( df, states=difference["states"], counties=difference["counties"] ) TS = TS - TS_2 TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA) MA_2 = trailing_moving_average(TS_daily, day_MA_2) what = MA trace1 = go.Scatter( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", name="{}-day MA".format(day_MA), marker={"color": "red"}, ) what = TS_daily trace2 = go.Scatter( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="markers", name="daily val", marker={"color": "blue", "size": 4, "opacity": 0.5}, ) what = MA_2 trace3 = go.Scatter( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", name="{}-day MA".format(day_MA_2), marker={"color": "rgba(0,160,0,0.7)"}, ) data = [trace1, trace3, trace2] legend = {"orientation": "v", "x": 0, "y": -0.35} if y_range_override is not None: y_range = y_range_override else: y_range = 1.2 * MA.values[ignore_left_points:].max() layout = { "title": {"text": title, "x": 0}, "xaxis": {"title": "date"}, "yaxis": {"range": [0, y_range]}, "margin": {"l": 0, "r": 0, "t": 50}, "legend": legend, } fig = {"data": data, "layout": layout} if interactive: iplot(fig, config={"displayModeBar": False}) else: fig = go.Figure(data=data, layout=layout) config = dict(displayModeBar=False) fig.show(config=config, renderer="png", width=800) # IMPORTANT NOTE: Beginning around August 1, most states started reporting far fewer daily tests than they did during the month of July. # For example, during the last week of July, Texas reported an average of 62546 daily tests, but during the first week of August, reported an average of only 47254 daily tests. # Therefore, a decreasing number of daily new confirmed cases in the graphs below may correspond more to a decrease in the number of tests performed than a decrease in the true number of new infections. # make graphs single_area_graph( df_confirmed, "USA daily new confirmed cases", states="all", counties="all" ) single_area_graph( df_confirmed, "USA minus NY and NJ daily new confirmed cases", states="all", counties="all", difference={"states": ["New York", "New Jersey"], "counties": "all"}, ) single_area_graph( df_confirmed, "TX daily new confirmed cases", states=["Texas"], counties="all" ) ignore_left_points = 50 data = [] today_data = [] colorwheel = 3 for state in all_states: TS = time_series(df_confirmed, states=[state], counties="all") TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA) MA_2 = trailing_moving_average(TS_daily, day_MA_2) what = MA if state == "Texas": color = "red" opacity = 1 elif state == "New York": color = "black" opacity = 0.5 elif state == "Arizona": color = "DarkOrange" opacity = 0.75 elif state == "Florida": color = "rgb(50,100,50)" opacity = 0.5 elif state == "Nevada": color = "DarkBlue" opacity = 0.5 else: color = "hsl({},50%,30%)".format(colorwheel) # color = 'hsl(100,90%,30%)' colorwheel += 360 / 50 opacity = 0.1 what = ( what / get_population(states=[state], counties="all") * 100000 ) # switch to new cases per 100,000 trace = go.Scatter( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", opacity=opacity, marker={"color": color}, name=state, ) data.append(trace) today_data.append(what.values[-1]) today_data = pd.DataFrame( index=all_states, data={ "Daily new cases per 100,000 residents, average of last 7 days (lower is better)": today_data }, ) # legend = {'orientation':'v', 'x':0, 'y':-0.35} legend = {} layout = { "title": { "text": "Daily new cases per 100,000 residents, 7-day moving average (Texas in red)", "x": 0, }, "legend": legend, "margin": {"l": 0, "r": 0, "t": 50}, "showlegend": True, } fig = go.Figure({"data": data, "layout": layout}) fig.show() today_data = today_data.sort_values( "Daily new cases per 100,000 residents, average of last 7 days (lower is better)", ascending=False, ) today_data.head(50) # graph new confirmed cases by state for all 50 states + DC ignore_left_points = 39 # initial number of datapoints not plotted (but still computed). Set to 39 to start the graphs at 3/1/2020 df = df_confirmed fig = make_subplots( rows=17, cols=3, subplot_titles=all_states, shared_xaxes=True, vertical_spacing=0.01 ) for i_state, state in enumerate(all_states): states = [state] counties = "all" TS = time_series(df, states=states, counties=counties) TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA) MA_2 = trailing_moving_average(TS_daily, day_MA_2) row, col = divmod(i_state, 3) row += 1 col += 1 what = MA fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", marker={"color": "red"}, name="{}-day MA".format(day_MA), ), row=row, col=col, ) what = TS_daily fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="markers", marker={"color": "blue", "size": 2, "opacity": 0.5}, name="daily val", ), row=row, col=col, ) what = MA_2 fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", marker={"color": "rgba(0,160,0,0.7)"}, name="{}-day MA".format(day_MA_2), ), row=row, col=col, ) fig.update_yaxes( range=[0, 1.2 * MA.values[ignore_left_points:].max()], row=row, col=col ) # if i_state>6: # break legend = {"orientation": "v", "x": 0, "y": -0.35} layout = { "title": {"text": "Daily new confirmed cases by state", "x": 0}, "height": 3000, "width": 1000, "showlegend": False, } #'xaxis':{'title':'date'}, 'yaxis':{'range':[0,1.2*MA.values[ignore_left_points:].max()]}, 'margin':{'l':0, 'r':0, 't':50}, } fig.update_layout(layout) fig.update_layout(margin={"l": 0, "r": 0}) # , row=row, col=col) config = dict(displayModeBar=False) for i in fig["layout"]["annotations"]: i["font"]["size"] = 12 # change title size for the subgraphs fig.show(config=config, renderer="png", width=800, height=2000) # fig = {'data':data, 'layout':layout} # iplot(fig, config={'displayModeBar':False}) # IMPORTANT NOTE: The following graph is created from TX HHS data. title = "TX active COVID-19 hospitalizations" trace1 = go.Scatter( x=df_TX_hospitalizations.Date, y=df_TX_hospitalizations.Hospitalizations, mode="lines+markers", name="Hopsitalizations", marker={"color": "red", "opacity": 0.5}, ) data = [trace1] legend = {"orientation": "v", "x": 0, "y": -0.35} layout = { "title": {"text": title, "x": 0}, "xaxis": {}, "yaxis": { "range": [0, 1.2 * df_TX_hospitalizations.Hospitalizations.max()], "title": "People hospitalized", }, "margin": {"l": 0, "r": 0, "t": 50}, "legend": legend, } fig = {"data": data, "layout": layout} fig = go.Figure(data=data, layout=layout) config = dict(displayModeBar=False) fig.show(config=config, renderer="png", width=800) if ( 1 ): # Why can't these ****ers keep their spreadsheet structure the same from day to day? title = "TX new molecular tests and percentage of new tests positive by specimen collection date (TX HHS data)" pd.Series.reverse = pd.DataFrame.reverse = lambda self: self[::-1] fig = make_subplots(specs=[[{"secondary_y": True}]]) # mind the reverse()'s fig.add_trace( go.Scattergl( x=df_TX_pos_rate["Specimen Collection Date"].reverse(), y=df_TX_pos_rate["Molecular Positivity Rate (Previous 7 Days)"].reverse(), name="% of new tests positive (7-day MA)", mode="lines+markers", marker={"opacity": 0.5}, ), secondary_y=False, ) # fig.add_trace( go.Scattergl(x = df_TX_pos_rate['Date'], y = df_TX_pos_rate['New Viral Tests Reported* (Average of previous 7 days)'], name = '# of new tests (7-day MA)', mode='lines+markers', marker={'opacity':0.5}), secondary_y = True ) # fig.add_trace( go.Scattergl(x = df_TX_pos_rate['Specimen Collection Date'], y = trailing_moving_average(df_TX_pos_rate['Test Results'].reverse()+df_TX_pos_rate['New Test Results'].reverse(), 7).reverse(), name = 'Avg # of daily tests in last 7 days', mode='lines+markers', marker={'opacity':0.5}), secondary_y = True ) fig.add_trace( go.Scattergl( x=df_TX_pos_rate["Specimen Collection Date"].reverse(), y=trailing_moving_average( df_TX_pos_rate["Test Results"] + df_TX_pos_rate["New Test Results"], 7 ).reverse(), name="Avg # of daily tests in last 7 days", mode="lines+markers", marker={"opacity": 0.5}, ), secondary_y=True, ) legend = {"orientation": "v", "x": 0, "y": -0.35} layout = { "title": {"text": title, "x": 0}, "margin": {"l": 0, "r": 0, "t": 50}, "legend": legend, } fig.update_layout(layout) config = dict(displayModeBar=False) fig.show( config=config, renderer="png", width=800, ) # examine time for number of cases to increase by number of active cases (which isn't the same thing as active cases doubling) # The following table displays an approximation of the doubling time for the number of active cases. Specifically, it shows the number of days it would take for the cumulative number of cases in each state to increase by the number of currently active cases in that state if the state's case growth rate (see graphs immediately above) does not change. This is not exactly the same as the doubling time for the number of active cases, because some currently-active cases will resolve in this time. # States with an accelerating number of cases will take less time than displayed, and cases with a decelerating number of cases will take more time. # It is possible for a state to see an accelerating number of cases due to an increase in testing. This table includes information on whether daily testing for each state is increasing, decreasing, or flat. # States with fewer than 1000 active cases are excluded from this table. if 0: # This code isn't elegant. Sorry. Also something in the data starting around 3/10/2021 is causing a division by 0 error, and I don't see the point in trying to fix it, since this table hasn't been very useful for a while now. df = df_JHU_daily_us[["Province_State", "Active"]] df = df.loc[df["Province_State"].isin(all_states), :] df_active = df.set_index("Province_State") accel_points_1 = [] most_recent_MA_day_case_increase = [] for state in all_states: TS = time_series(df_confirmed, states=[state], counties="all") TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA) most_recent_MA_day_case_increase.append(MA[-1]) accel_points_1.append(MA[-14]) df_MA_newest_incr_rate = pd.DataFrame( index=all_states, data={"incr_rate": most_recent_MA_day_case_increase} ) df_MA_incr_rate_14_days_ago = pd.DataFrame( index=all_states, data={"incr_rate": accel_points_1} ) df1 = df_active["Active"] / df_MA_newest_incr_rate["incr_rate"] accel_points_2 = [] most_recent_MA_2_day_case_increase = [] for state in all_states: TS = time_series(df_confirmed, states=[state], counties="all") TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA_2) most_recent_MA_2_day_case_increase.append(MA[-1]) accel_points_2.append(MA[-14]) df_MA_2_newest_incr_rate = pd.DataFrame( index=all_states, data={"incr_rate": most_recent_MA_2_day_case_increase} ) df_MA_2_incr_rate_14_days_ago = pd.DataFrame( index=all_states, data={"incr_rate": accel_points_2} ) df2 = df_active["Active"] / df_MA_2_newest_incr_rate["incr_rate"] r0, r1, rr0, rr1 = ( df_MA_incr_rate_14_days_ago["incr_rate"], df_MA_newest_incr_rate["incr_rate"], df_MA_2_incr_rate_14_days_ago["incr_rate"], df_MA_2_newest_incr_rate["incr_rate"], ) is_accel = (1.1 * r0 < r1) & (1.1 * rr0 < rr1) # vectorized op is_decel = (1.1 * r1 < r0) & (1.1 * rr1 < rr0) is_neither = (~is_accel) & (~is_decel) df_JHU_daily_us.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m7.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m14.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m21.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m28.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 def compute_tests_incr_or_decr(return_rate=True): df_daily_most_recent = df_JHU_daily_us[ ["Province_State", "People_Tested"] ].copy() df_daily_m7 = df_JHU_daily_us_m7[["Province_State", "People_Tested"]].copy() df_daily_m14 = df_JHU_daily_us_m14[["Province_State", "People_Tested"]].copy() df_daily_m21 = df_JHU_daily_us_m21[["Province_State", "People_Tested"]].copy() df_daily_m28 = df_JHU_daily_us_m28[["Province_State", "People_Tested"]].copy() for df in [ df_daily_most_recent, df_daily_m7, df_daily_m14, df_daily_m21, df_daily_m28, ]: df.set_index("Province_State", inplace=True) df_MR_14_avg_tests = (df_daily_most_recent - df_daily_m14) / 14 df_MR_7_avg_tests = (df_daily_most_recent - df_daily_m7) / 7 df_7_days_ago_7_avg_tests = (df_daily_m7 - df_daily_m14) / 7 df_7_days_ago_14_avg_tests = (df_daily_m7 - df_daily_m21) / 14 df_14_days_ago_14_avg_tests = (df_daily_m14 - df_daily_m28) / 14 # To be increasing, we should be doing at least 3% more tests now than a week ago, and 10% now than two weeks ago. tests_incr = ( df_MR_7_avg_tests["People_Tested"] > 1.03 * df_7_days_ago_7_avg_tests["People_Tested"] ) & ( df_MR_14_avg_tests["People_Tested"] > 1.10 * df_14_days_ago_14_avg_tests["People_Tested"] ) tests_decr = ( df_MR_7_avg_tests["People_Tested"] < 0.97 * df_7_days_ago_7_avg_tests["People_Tested"] ) & ( df_MR_14_avg_tests["People_Tested"] < 1 * df_14_days_ago_14_avg_tests["People_Tested"] ) tests_neither = (~tests_incr) & (~tests_decr) to_return = [tests_incr, tests_decr, tests_neither] if return_rate: to_return.append( ( (df_MR_14_avg_tests / df_7_days_ago_14_avg_tests - 1) * 100 + (df_MR_14_avg_tests / df_14_days_ago_14_avg_tests - 1) * 100 ) / 2 ) # average the testing increase rates measured two different ways return to_return ( tests_incr, tests_decr, tests_neither, df_tests_incr_rate, ) = compute_tests_incr_or_decr() df_min = df1.combine(df2, min) df_min = df_min.rename("min") df_max = df1.combine(df2, max) df_max = df_max.rename("max") df = pd.DataFrame(df_min) # df['Tests are up by...'] = df_tests_incr_rate df["max"] = df_max df = df.loc[ df_active["Active"] >= 1000, : ].copy() # exclude states with <1000 active cases df = df.sort_values("min") df["Days until # cases increases by # active cases (larger is better)"] = df[ "min" ].combine(df["max"], lambda x, y: "{:.2f} to {:.2f}".format(x, y)) df.drop(["max", "min"], axis=1, inplace=True) df["Number of active cases"] = df_active["Active"].astype(int) df["Cases accelerating?"] = is_accel.apply( lambda x: "Accelerating" if x is True else None ) df.loc[is_decel, "Cases accelerating?"] = "Decelerating" df.loc[is_neither, "Cases accelerating?"] = "Neither / Unclear" df["Tests increasing?"] = tests_incr.apply( lambda x: "Increasing" if x is True else None ) df.loc[tests_decr, "Tests increasing?"] = "Decreasing" df.loc[ tests_neither, "Tests increasing?" ] = "Neither clearly increasing nor clearly decreasing" def color_cells(val): if val in ["Accelerating", "Decreasing"]: color = "red" elif val in ["Decelerating", "Increasing"]: color = "green" else: color = None return "color: %s" % color df = df.reset_index() df = df.rename( columns={ "Province_State": "State", "Cases accelerating?": "Cases are...", "Tests increasing?": "Daily tests are...", } ) # df = df.drop(columns = ['Tests are...'], axis = 1) df = df.set_index("State") df.head(51).style.applymap(color_cells) # figure out how to describe this, then show the output # As tests rise, so do the number of cases. If cases are rising faster than tests, that means the percentage of tests that are positive is increasing. When this happens, this is very bad. At minimum, as we expand testing we should hope that the number of cases grows no more rapildy than the number of tests, and ideally, we would like to see the number of tests growing more rapidly than the number of cases. # Compute 7 and 14 day percentage increase in tests and cases df_JHU_daily_us.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m7.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m14.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m21.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_JHU_daily_us_m28.rename( {"Total_Test_Results": "People_Tested"}, axis="columns", inplace=True ) # Data format change on 11/12/2020 df_tests_cases = df_JHU_daily_us[["Province_State", "People_Tested", "Confirmed"]] df_tests_cases_m7 = df_JHU_daily_us_m7[["Province_State", "People_Tested", "Confirmed"]] df_tests_cases_m14 = df_JHU_daily_us_m14[ ["Province_State", "People_Tested", "Confirmed"] ] for df_TC in [df_tests_cases, df_tests_cases_m7, df_tests_cases_m14]: df_TC.set_index("Province_State", inplace=True) df_tests_cases_incr_14_days = ((df_tests_cases / df_tests_cases_m14) - 1) * 100 df_tests_cases_incr_7_days = ((df_tests_cases / df_tests_cases_m7) - 1) * 100 df_tests_cases_incr_7_days[ "Total test growth (last 7 days)" ] = df_tests_cases_incr_7_days["People_Tested"].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_7_days[ "Total case growth (last 7 days)" ] = df_tests_cases_incr_7_days["Confirmed"].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_14_days[ "Total test growth (last 14 days)" ] = df_tests_cases_incr_14_days["People_Tested"].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_14_days[ "Total case growth (last 14 days)" ] = df_tests_cases_incr_14_days["Confirmed"].apply(lambda x: "{:.1f}%".format(x)) df = df_tests_cases_incr_7_days[ ["Total case growth (last 7 days)", "Total test growth (last 7 days)"] ].join( df_tests_cases_incr_14_days[ ["Total case growth (last 14 days)", "Total test growth (last 14 days)"] ] ) df = df.reset_index() df = df.loc[df["Province_State"].isin(all_states), :] df = df.rename(columns={"Province_State": "State"}) df = df.set_index("State") df.head(51) # Note: As of July 5, 2021, Walker County no longer provides a breakdown of COVID cases into free population vs. imprisoned population. The following graph includes the full population of Walker county. # legacy code: not doing this any more # add Walker Free info to df_confirmed. Only add columns that are already in df_confirmed. (We only want to graph dates where we have all the info for everywhere.) # for col in df_confirmed.columns.values: # if col not in df_Walker_TX_free: # df_Walker_TX_free[col] = np.nan # for col in df_Walker_TX_free.columns.values: # if col not in df_confirmed: # df_Walker_TX_free.drop(col, axis=1, inplace=True) # if sum(df_confirmed['Admin2']=='Walker Free') == 0: #if we haven't added Walker Free info yet # df_confirmed = df_confirmed.append(df_Walker_TX_free, ignore_index = True) # single_area_graph(df_confirmed, 'Walker County, TX daily new confirmed cases', states=['Texas'], counties=['Walker']) single_area_graph( df_confirmed, "Walker County, TX daily new confirmed cases", states=["Texas"], counties=["Walker"], y_range_override=50, ) single_area_graph( df_confirmed, "Walker County, TX and surrounding areas daily new confirmed cases", states=["Texas"], counties=[ "Walker", "Harris", "Montgomery", "Grimes", "Brazos", "San Jacinto", "Trinity", "Houston", "Madison", ], ) single_area_graph( df_confirmed, "Harris County, TX daily new confirmed cases", states=["Texas"], counties=["Harris"], ) single_area_graph( df_confirmed, "Montgomery County, TX daily new confirmed cases", states=["Texas"], counties=["Montgomery"], ) single_area_graph( df_confirmed, "Nueces County, TX daily new confirmed cases", states=["Texas"], counties=["Nueces"], ) single_area_graph( df_confirmed, "San Patricio County, TX daily new confirmed cases", states=["Texas"], counties=["San Patricio"], ) single_area_graph( df_confirmed, "Douglas County, NV daily new confirmed cases", states=["Nevada"], counties=["Douglas"], ) single_area_graph( df_confirmed, "El Dorado County, CA daily new confirmed cases", states=["California"], counties=["El Dorado"], ) single_area_graph( df_confirmed, "Placer County, CA daily new confirmed cases", states=["California"], counties=["Placer"], ) single_area_graph( df_confirmed, "Sacramento County, CA daily new confirmed cases", states=["California"], counties=["Sacramento"], ) single_area_graph( df_confirmed, "Fulton County, GA daily new confirmed cases", states=["Georgia"], counties=["Fulton"], ) single_area_graph( df_confirmed, "Yavapai County, AZ daily new confirmed cases", states=["Arizona"], counties=["Yavapai"], ) single_area_graph( df_confirmed, "Maricopa County, AZ daily new confirmed cases", states=["Arizona"], counties=["Maricopa"], ) single_area_graph( df_confirmed, "Sedgwick County, KS daily new confirmed cases", states=["Kansas"], counties=["Sedgwick"], ) single_area_graph( df_confirmed, "Clark County, NV daily new confirmed cases", states=["Nevada"], counties=["Clark"], ) single_area_graph( df_confirmed, "Miami-Dade County, FL daily new confirmed cases", states=["Florida"], counties=["Miami-Dade"], ) single_area_graph(df_deaths, "USA daily deaths", states="all", counties="all") single_area_graph( df_deaths, "USA minus NY and NJ daily deaths", states="all", counties="all", difference={"states": ["New York", "New Jersey"], "counties": "all"}, ) single_area_graph(df_deaths, "TX daily deaths", states=["Texas"], counties="all") ignore_left_points = 39 # initial number of datapoints not plotted (but still computed). Set to 39 to start the graphs at 3/1/2020 df = df_deaths fig = make_subplots( rows=17, cols=3, subplot_titles=all_states, shared_xaxes=True, vertical_spacing=0.01 ) for i_state, state in enumerate(all_states): states = [state] counties = "all" TS = time_series(df, states=states, counties=counties) TS_daily = daily_change_in_time_series(TS) MA = trailing_moving_average(TS_daily, day_MA) MA_2 = trailing_moving_average(TS_daily, day_MA_2) row, col = divmod(i_state, 3) row += 1 col += 1 what = MA fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", marker={"color": "red"}, name="{}-day MA".format(day_MA), ), row=row, col=col, ) what = TS_daily fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="markers", marker={"color": "blue", "size": 2, "opacity": 0.5}, name="daily val", ), row=row, col=col, ) what = MA_2 fig.add_trace( go.Scattergl( x=what.index[ignore_left_points:], y=what.values[ignore_left_points:], mode="lines", marker={"color": "rgba(0,160,0,0.7)"}, name="{}-day MA".format(day_MA_2), ), row=row, col=col, ) fig.update_yaxes( range=[0, 1.2 * MA.values[ignore_left_points:].max()], row=row, col=col ) # if i_state>6: # break legend = {"orientation": "v", "x": 0, "y": -0.35} layout = { "title": {"text": "Daily deaths by state", "x": 0}, "height": 3000, "width": 1000, "showlegend": False, } #'xaxis':{'title':'date'}, 'yaxis':{'range':[0,1.2*MA.values[ignore_left_points:].max()]}, 'margin':{'l':0, 'r':0, 't':50}, } fig.update_layout(layout) fig.update_layout(margin={"l": 0, "r": 0}) # , row=row, col=col) config = dict(displayModeBar=False) for i in fig["layout"]["annotations"]: i["font"]["size"] = 12 # change title size for the subgraphs fig.show(config=config, renderer="png", width=800, height=2000) # development area df_tests_cases = df_JHU_daily_us[["Province_State", "People_Tested", "Confirmed"]] df_tests_cases_m7 = df_JHU_daily_us_m7[["Province_State", "People_Tested", "Confirmed"]] df_tests_cases_m14 = df_JHU_daily_us_m14[ ["Province_State", "People_Tested", "Confirmed"] ] for df_TC in [df_tests_cases, df_tests_cases_m7, df_tests_cases_m14]: df_TC.set_index("Province_State", inplace=True) df_tests_cases_incr_14_days = ((df_tests_cases / df_tests_cases_m14) - 1) * 100 df_tests_cases_incr_7_days = ((df_tests_cases / df_tests_cases_m7) - 1) * 100 df_tests_cases_incr_7_days["Tests increase (7 days)"] = df_tests_cases_incr_7_days[ "People_Tested" ].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_7_days["Cases increase (7 days)"] = df_tests_cases_incr_7_days[ "Confirmed" ].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_14_days["Tests increase (14 days)"] = df_tests_cases_incr_14_days[ "People_Tested" ].apply(lambda x: "{:.1f}%".format(x)) df_tests_cases_incr_14_days["Cases increase (14 days)"] = df_tests_cases_incr_14_days[ "Confirmed" ].apply(lambda x: "{:.1f}%".format(x)) # df_JHU_daily_us[['Province_State','Confirmed']] - df_JHU_daily_us_m14[['Province_State','Confirmed']] time_series(df_confirmed, states=["Texas"]) get_population(states=["Texas"]) # df_TX_pos_rate.head() # df_TX_pos_rate.columns # df_JHU_daily_us.columns df_TX_pos_rate df_TX_pos_rate["Specimen Collection Date"] df_TX_pos_rate["Test Results"] time_series(df=df_deaths, states=["Texas"], counties="all").tail(9) df_TX_hospitalizations["Hospitalizations"] # with pd.option_context('display.max_rows', None, 'display.max_columns', 25): # print(df_confirmed.head())
false
0
21,757
0
21,757
21,757
69696298
<jupyter_start><jupyter_text>Logistic regression To predict heart disease **LOGISTIC REGRESSION - HEART DISEASE PREDICTION** **Introduction** World Health Organization has estimated 12 million deaths occur worldwide, every year due to Heart diseases. Half the deaths in the United States and other developed countries are due to cardio vascular diseases. The early prognosis of cardiovascular diseases can aid in making decisions on lifestyle changes in high risk patients and in turn reduce the complications. This research intends to pinpoint the most relevant/risk factors of heart disease as well as predict the overall risk using logistic regression Data Preparation Source The dataset is publically available on the Kaggle website, and it is from an ongoing cardiovascular study on residents of the town of Framingham, Massachusetts. The classification goal is to predict whether the patient has 10-year risk of future coronary heart disease (CHD).The dataset provides the patients’ information. It includes over 4,000 records and 15 attributes. Variables Each attribute is a potential risk factor. There are both demographic, behavioral and medical risk factors. Demographic: • Sex: male or female(Nominal) • Age: Age of the patient;(Continuous - Although the recorded ages have been truncated to whole numbers, the concept of age is continuous) Behavioral • Current Smoker: whether or not the patient is a current smoker (Nominal) • Cigs Per Day: the number of cigarettes that the person smoked on average in one day.(can be considered continuous as one can have any number of cigarettes, even half a cigarette.) Medical( history) • BP Meds: whether or not the patient was on blood pressure medication (Nominal) • Prevalent Stroke: whether or not the patient had previously had a stroke (Nominal) • Prevalent Hyp: whether or not the patient was hypertensive (Nominal) • Diabetes: whether or not the patient had diabetes (Nominal) Medical(current) • Tot Chol: total cholesterol level (Continuous) • Sys BP: systolic blood pressure (Continuous) • Dia BP: diastolic blood pressure (Continuous) • BMI: Body Mass Index (Continuous) • Heart Rate: heart rate (Continuous - In medical research, variables such as heart rate though in fact discrete, yet are considered continuous because of large number of possible values.) • Glucose: glucose level (Continuous) Predict variable (desired target) • 10 year risk of coronary heart disease CHD (binary: “1”, means “Yes”, “0” means “No”) Logistic Regression Logistic regression is a type of regression analysis in statistics used for prediction of outcome of a categorical dependent variable from a set of predictor or independent variables. In logistic regression the dependent variable is always binary. Logistic regression is mainly used to for prediction and also calculating the probability of success. The results above show some of the attributes with P value higher than the preferred alpha(5%) and thereby showing low statistically significant relationship with the probability of heart disease. Backward elimination approach is used here to remove those attributes with highest P-value one at a time followed by running the regression repeatedly until all attributes have P Values less than 0.05. Feature Selection: Backward elimination (P-value approach) Logistic regression equation P=eβ0+β1X1/1+eβ0+β1X1P=eβ0+β1X1/1+eβ0+β1X1 When all features plugged in: logit(p)=log(p/(1−p))=β0+β1∗Sexmale+β2∗age+β3∗cigsPerDay+β4∗totChol+β5∗sysBP+β6∗glucoselogit(p)=log(p/(1−p))=β0+β1∗Sexmale+β2∗age+β3∗cigsPerDay+β4∗totChol+β5∗sysBP+β6∗glucose Interpreting the results: Odds Ratio, Confidence Intervals and P-values • This fitted model shows that, holding all other features constant, the odds of getting diagnosed with heart disease for males (sex_male = 1)over that of females (sex_male = 0) is exp(0.5815) = 1.788687. In terms of percent change, we can say that the odds for males are 78.8% higher than the odds for females. • The coefficient for age says that, holding all others constant, we will see 7% increase in the odds of getting diagnosed with CDH for a one year increase in age since exp(0.0655) = 1.067644. • Similarly , with every extra cigarette one smokes thers is a 2% increase in the odds of CDH. • For Total cholesterol level and glucose level there is no significant change. • There is a 1.7% increase in odds for every unit increase in systolic Blood Pressure. Model Evaluation - Statistics From the above statistics it is clear that the model is highly specific than sensitive. The negative values are predicted more accurately than the positives. Predicted probabilities of 0 (No Coronary Heart Disease) and 1 ( Coronary Heart Disease: Yes) for the test data with a default classification threshold of 0.5 lower the threshold Since the model is predicting Heart disease too many type II errors is not advisable. A False Negative ( ignoring the probability of disease when there actually is one) is more dangerous than a False Positive in this case. Hence in order to increase the sensitivity, threshold can be lowered. Conclusions • All attributes selected after the elimination process show P-values lower than 5% and thereby suggesting significant role in the Heart disease prediction. • Men seem to be more susceptible to heart disease than women. Increase in age, number of cigarettes smoked per day and systolic Blood Pressure also show increasing odds of having heart disease • Total cholesterol shows no significant change in the odds of CHD. This could be due to the presence of 'good cholesterol(HDL) in the total cholesterol reading. Glucose too causes a very negligible change in odds (0.2%) • The model predicted with 0.88 accuracy. The model is more specific than sensitive. Overall model could be improved with more data Appendix http://www.who.int/mediacentre/factsheets/fs317/en/ Data Source References https://www.kaggle.com/amanajmera1/framingham-heart-study-dataset/data Kaggle dataset identifier: heart-disease-prediction-using-logistic-regression <jupyter_code>import pandas as pd df = pd.read_csv('heart-disease-prediction-using-logistic-regression/framingham.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 4238 entries, 0 to 4237 Data columns (total 16 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 male 4238 non-null int64 1 age 4238 non-null int64 2 education 4133 non-null float64 3 currentSmoker 4238 non-null int64 4 cigsPerDay 4209 non-null float64 5 BPMeds 4185 non-null float64 6 prevalentStroke 4238 non-null int64 7 prevalentHyp 4238 non-null int64 8 diabetes 4238 non-null int64 9 totChol 4188 non-null float64 10 sysBP 4238 non-null float64 11 diaBP 4238 non-null float64 12 BMI 4219 non-null float64 13 heartRate 4237 non-null float64 14 glucose 3850 non-null float64 15 TenYearCHD 4238 non-null int64 dtypes: float64(9), int64(7) memory usage: 529.9 KB <jupyter_text>Examples: { "male": 1.0, "age": 39.0, "education": 4.0, "currentSmoker": 0.0, "cigsPerDay": 0.0, "BPMeds": 0.0, "prevalentStroke": 0.0, "prevalentHyp": 0.0, "diabetes": 0.0, "totChol": 195.0, "sysBP": 106.0, "diaBP": 70.0, "BMI": 26.97, "heartRate": 80.0, "glucose": 77.0, "TenYearCHD": 0.0 } { "male": 0.0, "age": 46.0, "education": 2.0, "currentSmoker": 0.0, "cigsPerDay": 0.0, "BPMeds": 0.0, "prevalentStroke": 0.0, "prevalentHyp": 0.0, "diabetes": 0.0, "totChol": 250.0, "sysBP": 121.0, "diaBP": 81.0, "BMI": 28.73, "heartRate": 95.0, "glucose": 76.0, "TenYearCHD": 0.0 } { "male": 1.0, "age": 48.0, "education": 1.0, "currentSmoker": 1.0, "cigsPerDay": 20.0, "BPMeds": 0.0, "prevalentStroke": 0.0, "prevalentHyp": 0.0, "diabetes": 0.0, "totChol": 245.0, "sysBP": 127.5, "diaBP": 80.0, "BMI": 25.34, "heartRate": 75.0, "glucose": 70.0, "TenYearCHD": 0.0 } { "male": 0.0, "age": 61.0, "education": 3.0, "currentSmoker": 1.0, "cigsPerDay": 30.0, "BPMeds": 0.0, "prevalentStroke": 0.0, "prevalentHyp": 1.0, "diabetes": 0.0, "totChol": 225.0, "sysBP": 150.0, "diaBP": 95.0, "BMI": 28.58, "heartRate": 65.0, "glucose": 103.0, "TenYearCHD": 1.0 } <jupyter_script>import numpy as np import pandas as pd import matplotlib.pyplot as plt from tabulate import tabulate import missingno as msno import tabulate as tb import statsmodels.api as sm from statsmodels.formula.api import ols import scipy.stats as stats import statsmodels.stats.multicomp as multi from sklearn import preprocessing import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import seaborn as sns data = pd.DataFrame( pd.read_csv( "../input/heart-disease-prediction-using-logistic-regression/framingham.csv" ) ) display(data) data.shape data.dtypes np.sum(data.isnull()) msno.matrix(data) # In the above graph, white lines represent missing values and their location. As can be seen above, the variable with the most missing values is glucose while 9 other variables don't have any such as gender, age, smoking status. In this project, missing values will be imputed by the appropriate method. # Descriptive statistics of numeric variables data[ ["age", "cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].dropna().describe() # The table of 10-year risk of coronary heart disease and gender shows the number of people in each group. By that, we can conclude that men in the data who have a 10-year risk of coronary heart disease are greater than women while for the other group that has no risk it is the opposite. # Imputed data data_wo_na = data.copy() data_wo_na["cigsPerDay"] = data_wo_na["cigsPerDay"].fillna( data_wo_na["cigsPerDay"].mode().iloc[0] ) data_wo_na["totChol"] = data_wo_na["totChol"].fillna(data_wo_na["totChol"].median()) data_wo_na["BMI"] = data_wo_na["BMI"].fillna(data_wo_na["BMI"].median()) data_wo_na["heartRate"] = data_wo_na["heartRate"].fillna( data_wo_na["heartRate"].median() ) data_wo_na["glucose"] = data_wo_na["glucose"].fillna(data_wo_na["glucose"].median()) data_wo_na["education"] = data_wo_na["education"].fillna( data_wo_na["education"].mode().iloc[0] ) data_wo_na["BPMeds"] = data_wo_na["BPMeds"].fillna(0) bins = [29, 39, 49, 59, 69, 79] labels = ["30-39", "40-49", "50-59", "60-69", "70-79"] data["agerange"] = pd.cut(data.age, bins, labels=labels, include_lowest=True) bins = [29, 39, 49, 59, 69, 79] labels = ["30-39", "40-49", "50-59", "60-69", "70-79"] data_wo_na["agerange"] = pd.cut(data.age, bins, labels=labels, include_lowest=True) data[ [ "male", "education", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "TenYearCHD", "education", ] ] = data[ [ "male", "education", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "TenYearCHD", "education", ] ].astype( "category" ) print(data.dtypes) data_wo_na[ [ "male", "education", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "TenYearCHD", "education", ] ] = data_wo_na[ [ "male", "education", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "TenYearCHD", "education", ] ].astype( "category" ) print(data_wo_na.dtypes) data.groupby("TenYearCHD").mean() # All numeric variables are higher in the group who have a 10-year risk of coronary heart disease. # Descriptive statistics after imputation data_wo_na[ ["age", "cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].describe() # **EDA-RESEARCH QUESTIONS** # *1) How does the 10-year risk of coronary heart disease change by other variables in the data?* palette = sns.color_palette("mako_r", 6) sns.catplot(x="TenYearCHD", kind="count", palette=palette, data=data) TenYearCHD_table = pd.crosstab(index=data["TenYearCHD"], columns="count") sns.heatmap(TenYearCHD_table, cmap=palette, annot=True, fmt="g") # As it can be seen in the above graph and the frequency table, data consist of 3594 people who don't have a 10-year risk of coronary heart disease and 644 people who have the risk. # GENDER x, y, hue = "TenYearCHD", "proportion", "male" hue_order = ["Female", "Male"] ( data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index() .pipe((sns.barplot, "data"), x=x, y=y, hue=hue, palette=palette) ) # Most of the people who don't have a 10-year risk of coronary heart disease are female while the ones who have the risk are generally male. from scipy.stats import chi2_contingency c = pd.crosstab(index=data["male"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was rejected, that is, the 10-year risk of coronary heart disease and gender have a significant relation. # AGE age_CHD_table = pd.crosstab(index=data["agerange"], columns=data["TenYearCHD"]) sns.heatmap(age_CHD_table, cmap=palette, annot=True, fmt="g") x, y, hue = "TenYearCHD", "proportion", "agerange" hue_order = ["30-39", "40-49", "50-59", "60-69", "70-79"] ( data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index() .pipe((sns.barplot, "data"), x=x, y=y, hue=hue, palette=palette) ) # In the above bar plots, the first one represents the frequency of 10-year risk of CHD in each age group while the second one shows the proportions. # By age frequency table and the first graph, it can be said that 40-49 age group is the most crowded one while age group 70-79 have only 2 people. # The age group and CHD table and the second graph show that age and the 10-year risk of coronary heart disease are directly proportional. In the age group 70-79, the risk increases to 50 percent while in the age group 30-39 it is less than 5 percent. # EDUCATION x, y, hue = "TenYearCHD", "proportion", "education" hue_order = [1, 2, 3, 4] ( data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index() .pipe((sns.barplot, "data"), x=x, y=y, hue=hue, palette=palette) ) # The proportion of the 10-year risk of coronary heart disease by education graph shows the percentages of 10-year risk of coronary heart disease in each education group. There is no huge difference between those percentages but the greatest risk is in education group 1 while the lowest is in group 2. c = pd.crosstab(index=data["education"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was rejected, that is, the 10-year risk of coronary heart disease and education have a significant relation. # PREVALENT STROKE c = pd.crosstab(index=data["prevalentStroke"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") # The data consist of only 25 people who had a stroke before and 44 percent of them have a 10-year risk of coronary heart disease. This seems like a big percentage but since the sample is very small, it did not give a considerable meaning apart from this data. stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was rejected, that is, the 10-year risk of coronary heart disease and prevalent stroke have a significant relation. # PREVALENT HYPERTENSION c = pd.crosstab(index=data["prevalentHyp"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was rejected, that is, the 10-year risk of coronary heart disease and prevalent hypertension have a significant relation. # DIABETES c = pd.crosstab(index=data["diabetes"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") # The table above shows the frequencies of people who have diabetes or not by the 10-year risk of coronary heart disease. And by that, it can be concluded that there is a huge difference in percentages for people who have diabetes. For people who don't have diabetes, the risk is 14.6 percent while for the other group the risk is 63.3 percent. stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was rejected, that is, the 10-year risk of coronary heart disease and diabetes have a significant relation. # SMOKING STATUS sns.catplot( x="currentSmoker", hue="TenYearCHD", kind="count", palette=palette, data=data ) # The above table shows the change in 10-year risk of coronary heart disease by smoking status. It can be said that smoking status has an inconsiderable effect on the risk in the data. c = pd.crosstab(index=data["currentSmoker"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was accepted, that is, the 10-year risk of coronary heart disease and smoking status does not have a significant relation. # BODY MASS INDEX ager_10ychd = pd.crosstab( index=data["agerange"], columns=data["TenYearCHD"], values=data["BMI"], aggfunc=np.mean, ).round(0) sns.heatmap(ager_10ychd, cmap=palette, annot=True, fmt="g") sns.catplot( x="TenYearCHD", y="BMI", hue="agerange", kind="box", data=data, palette=palette ) # The body mass index box plot shows the distributions for the 10-year risk of coronary heart disease by age group. # In the age group 30-39; There is not much difference in the median but for the risk group, it is slightly higher. Minimum, maximum values, and the first quartile are lower in the risk group but the third quartile is higher. # In the age group 40-49; The distribution for the risk group is almost the same as the group of people who don't have the risk. # In the age group 50-59; The distribution for the risk group is almost the same as the group of people who don't have the risk but the first and third quartile is slightly higher in the risk group. # In the age group 60-69; There is not much difference in the median and minimum value. Maximum values and the first quartile are lower in the risk group but the third quartile is higher. # GLUCOSE, TOTAL CHOLESTEROL, SYSTOLIC BLOOD PRESSURE, DIASTOLIC BLOOD PRESSURE, HEART RATE import matplotlib.pyplot as plt import seaborn as sns fig, axes = plt.subplots(2, 3, figsize=(18, 10)) sns.boxplot( ax=axes[0, 0], data=data_wo_na, y="glucose", x="TenYearCHD", palette=palette ) sns.boxplot( ax=axes[0, 1], data=data_wo_na, y="totChol", x="TenYearCHD", palette=palette ) sns.boxplot(ax=axes[1, 0], data=data_wo_na, y="sysBP", x="TenYearCHD", palette=palette) sns.boxplot(ax=axes[1, 1], data=data_wo_na, y="diaBP", x="TenYearCHD", palette=palette) sns.boxplot( ax=axes[0, 2], data=data_wo_na, y="heartRate", x="TenYearCHD", palette=palette ) # The above box plots show the distributions in each variable by the 10-year risk of coronary heart disease. # **Glucose-10 Year Risk of Coronary Heart Disease:** The distributions for both the risk group and the group of people who don't have the risk are almost the same except the third quartile which is greater and the maximum value which is slightly greater for the risk group. Both groups have so many outliers. # **Total Cholesterol-10 Year Risk of Coronary Heart Disease:** The distributions for both the risk group and the group of people who don't have the risk are almost the same. Both groups have so many outliers. # **Heart Rate-10 Year Risk of Coronary Heart Disease:** The distributions for both the risk group and the group of people who don't have the risk are almost the same except for the third quartile, minimum and maximum values which are slightly greater for the risk group. Both groups have so many outliers especially the group of people who don't have the risk. # **Systolic Blood Pressure-10 Year Risk of Coronary Heart Disease:** In the group that doesn't have a 10-year risk of coronary heart disease, the median is about 130 while in the other group that has a 10-year risk of coronary heart disease it is almost 150. The minimum systolic blood pressure value for both two groups are the same while the maximum value is much higher in the risk group. Also, the first and third quartiles are so much higher in the risk group. # **Diastolic Blood Pressure-10 Year Risk of Coronary Heart Disease:** Like systolic blood pressure, diastolic blood pressure's median, max, first quartile, and third quartile values are higher for the risk group. # 2) Does smoking status and the number of cigarettes smoked in a day affect heart rate and systolic blood pressure? data["cigsPerDay"].value_counts() bins = [0, 1, 3, 7, 11, 15, 19, 23, 27, 30, 39, 49, 80] labels = [ "0", "1-3", "4-7", "8-11", "12-15", "16-19", "20-23", "24-27", "27-30", "31-39", "40-49", "50+", ] data["cigrange"] = pd.cut(data.cigsPerDay, bins, labels=labels, include_lowest=True) data[["cigrange", "cigsPerDay"]] data_wo_na["cigsPerDay"].value_counts() bins = [0, 1, 3, 7, 11, 15, 19, 23, 27, 30, 39, 49, 80] labels = [ "0", "1-3", "4-7", "8-11", "12-15", "16-19", "20-23", "24-27", "27-30", "31-39", "40-49", "50+", ] data_wo_na["cigrange1"] = pd.cut( data_wo_na.cigsPerDay, bins, labels=labels, include_lowest=True ) data_wo_na[["cigrange1", "cigsPerDay"]] # Groups that have the range of cigarettes smoked in a day were created to see the results in graphs without a mess. # HEART RATE cgr_10ychd = pd.crosstab( index=data_wo_na["cigrange1"], columns=data_wo_na["agerange"], values=data_wo_na["heartRate"], aggfunc=np.mean, ).round(0) sns.heatmap(cgr_10ychd, cmap=palette, annot=True, fmt="g") sns.catplot( x="cigrange1", y="heartRate", aspect=1.5, kind="box", data=data_wo_na, palette="viridis", ) # The above box plot shows the distribution of heart rate and the range of cigarettes smoked in a day. Except for the 31-39 and 50+ groups, every group has almost the same median heart rate. The comments for each group were written as a comparison with the previous one. # * For the group who are non-smokers, the box plot shows a symmetric distribution. Also, this group has many outliers and this can be because of other variables such as age. Without outliers, the minimum value for this group is almost 45 which is very low even in resting. The maximum value is near 105 and the median is near 75. # * For the group 1-3, while the minimum heart rate increased, the maximum heart rate decreased. There is a left-skewed distribution in this group and this means that there is an agglomeration in between the median and the third quartile. So, we can say that in this group there are more people who have heartrate above the median than who have heartrate below the median. # * For the group 4-7, minimum and maximum values for heart rate are almost the same as the 1-3 group but the first and third quartiles are higher. This group also has an almost symmetric distribution. # * For the group 8-11, while the minimum value is lower the maximum value is higher than the previous group. The first and third quartiles are almost the same except the third quartile is a little lower in the 8-11. This group also looks symmetric. # * For the group 12-15 there is an increase in the minimum value but the maximum value remains the same as the previous group. The first and third quartiles are higher and there is a right-skewed distribution in this group. This means there is an agglomeration between the first quartile and the median. # * For the groups 16-19 the range of the minimum and maximum is getting the smallest. Also, the interquartile range is the smallest too. The distribution looks symmetric and there are only 2 outliers that are lower than the minimum value and very higher than the maximum. This can be a cause of other factors such as age. # * For the group 20-23 the distribution and descriptive statistics are almost equal to the group 12-15. But there are so many outliers in this case that are higher than the maximum value. # * For the group 24-27 the minimum value is much higher while the maximum is much lower than the previous one. The first quartile is almost the same but the third quartile is a little higher. There is a right-skewed distribution in this group which means there is an agglomeration in between the first quartile and the median. # * For the group 27-30 the minimum value is much lower while the maximum is much higher than the previous one. While the third quartile is almost the same, the first quartile is a little lower. There is also right-skewed distribution but not so obvious like the previous group. # * For the group 31-39 the median is lower than previous ones and there is a really obvious decrease in the maximum value which is confusing because it is expected that the number of cigarettes smoked and the heart rate are directly proportional. It can be because of other variables. The minimum value remains the same. There is a right-skewed distribution as well. # * For the group 40-49 there is almost the same distribution as the 27-30 except the first quartile and the minimum value which are a little higher. # * For the 50+ group the median and ranges between maximum and minimum values getting smaller. There is an obvious right-skewed distribution which means there is an agglomeration in between the first quartile and the median. plt.hist(data_wo_na["heartRate"]) stats.shapiro(data_wo_na["heartRate"]) # By the above histogram, it can be seen that heart rate is not normal. So the assumption of ANOVA, normality was not provided. Because of this, ANOVA can not be performed. Instead of ANOVA, Non-parametric Kruskal was used to see the differences between median systolic blood pressure in each age group. data_wo_na["cigrange1"].value_counts() data_wo_na["cigrange1"].value_counts() cr0 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "0"] cr1 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "1-3"] cr2 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "4-7"] cr3 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "8-11"] cr4 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "12-15"] cr5 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "16-19"] cr6 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "20-23"] cr7 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "24-27"] cr8 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "28-31"] cr9 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "31-39"] cr10 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "40-49"] cr11 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "50+"] cr0 = cr0.append(cr1) cr2 = cr2.append(cr3) cr4 = cr4.append(cr5) cr6 = cr6.append(cr8) cr8 = cr8.append(cr10) cr10 = cr10.append(cr11) # perform Kruskal-Wallis Test stats.kruskal(cr0, cr2, cr4, cr6, cr8, cr10) # Since the p-value of the Kruskal Wallis H Test is smaller than 0.05, the null hypothesis can be rejected. This means the median heart rate differ by the number of cigarettes smoked in a day. # SYSTOLIC BLOOD PRESSURE cgr_10ychdsys = pd.crosstab( index=data_wo_na["cigrange1"], columns=data_wo_na["agerange"], values=data_wo_na["sysBP"], aggfunc=np.mean, ).round(0) sns.heatmap(cgr_10ychdsys, cmap=palette, annot=True, fmt="g") sns.catplot( x="cigrange", y="sysBP", aspect=1.5, kind="box", data=data, palette="viridis" ) # The above box plot shows the distribution of systolic blood pressure and the range of cigarettes smoked in a day. The comments for each group were written as a comparison with the previous one. # * For the group who are non-smokers, the box plot shows a symmetric distribution. This group has many outliers and this can be because of other variables such as age. Without outliers, the minimum value for this group is almost 75. The maximum value is near 185 and the median is near 130. # * For the group 1-3, while the minimum systolic blood pressure(near 100) increased, the maximum systolic blood pressure(near 180) and the median(near 130) decreased. The distribution looks symmetric. # * For the group 4-7, minimum, maximum, first quartile, and third quartile are all lower than the previous group. This group also has an almost symmetric distribution. # * For the group 8-11, the minimum value and the maximum values are lower than the previous group. The first and third quartiles are almost the same. There is a right-skewed distribution which means there is an agglomeration between the first quartile and the median. # * For the group 12-15 there is a decrease in the minimum and the maximum values. The first and third quartiles are almost the same and there is an almost symmetric distribution in this group. # * For the groups 16-19 the minimum, maximum, first, and third quartiles and also median are higher. There is a right-skewed distribution. # * For the group 20-23 all descriptive statistics are lower than the previous group. # * For the group 24-27 the minimum value is much higher while the maximum is much lower than the previous one. The first quartile is almost the same but the third quartile is a little lower. There is a right-skewed distribution in this group which means there is an agglomeration in between the first quartile and the median. # * For the group 27-30 the minimum value is much lower while the maximum is much higher than the previous one. While the first quartile is almost the same, the first quartile is a little higher. There is an almost symmetric distribution. # * For the group 31-39 all descriptive statistics are higher except the first quartile and the distribution looks symmetric. # * For the group 40-49 all descriptive statistics are lower except the first quartile and the distribution looks symmetric. # * For the 50+ group the median and ranges between maximum and minimum values getting smaller. There is an obvious right-skewed distribution which means there is an agglomeration in between the first quartile and the median. plt.hist(data_wo_na["sysBP"]) stats.shapiro(data_wo_na["sysBP"]) # By the above histogram, it can be seen that systolic blood pressure is not normal. So the assumption of ANOVA, normality was not provided. Because of this, ANOVA can not be performed. Instead of ANOVA, Non-parametric Kruskal was used to see the differences between median systolic blood pressure in each age group. data_wo_na["cigrange1"].value_counts() cr0 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "0"] cr1 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "1-3"] cr2 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "4-7"] cr3 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "8-11"] cr4 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "12-15"] cr5 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "16-19"] cr6 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "20-23"] cr7 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "24-27"] cr8 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "28-31"] cr9 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "31-39"] cr10 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "40-49"] cr11 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "50+"] cr0 = cr0.append(cr1) cr2 = cr2.append(cr3) cr4 = cr4.append(cr5) cr6 = cr6.append(cr8) cr8 = cr8.append(cr10) cr10 = cr10.append(cr11) # perform Kruskal-Wallis Test stats.kruskal(cr0, cr2, cr4, cr6, cr8, cr10) # Since the p-value of the Kruskal Wallis H Test is smaller than 0.05, the null hypothesis can be rejected. This means the median systolic blood pressure differs by the number of cigarettes smoked in a day. # 3) Does smoking status and the number of cigarettes smoked change by gender, age and education? x, y, hue = "currentSmoker", "proportion", "male" hue_order = ["Male", "Female"] ( data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index() .pipe((sns.barplot, "data"), x=x, y=y, hue=hue, palette=palette) ) # The above graph shows the proportion of people in the status smokers and non-smokers while the colors show the gender in each group. In this data, almost 60 percent of people who are smokers are female while more than 60 percent of people who are not smokers are male. x, y, hue = "currentSmoker", "proportion", "agerange" hue_order = ["30-39", "40-49", "50-59", "60-69", "70-79"] ( data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index() .pipe((sns.barplot, "data"), x=x, y=y, hue=hue, palette=palette) ) # Similar to the previous graph, the above graph shows the proportion for smoking status. But in this case, it shows the proportion of age groups instead of gender. In the x axis, 0 represents non-smokers while 1 represents smokers. # All people in the age group 70-79 are smokers by this graph. But it is not correct to conclude with this result since there are only 2 people in that group in the data. # In the age groups 60-69 and 50-59, the majority of people are smokers while for the age groups 40-49 and 30-39 it is the opposite. But generally, the highest percentage of people who are smokers are in the 60-69 age group if 70-79 will not be included. sns.catplot( x="cigrange", kind="count", hue="male", aspect=1.5, data=data, palette=palette ) # The range of cigarettes smoked in a day and gender graph shows the following conclusions; # * Women in the data who don't smoke are more than 2 times of men who are non-smokers. Also, it can be said that the people who don't smoke are the majority. # * In the range of 1-15 cigarettes smoked in a day, the majority are women. # * In the range of 16-19 and 31-39 cigarettes smoked in a day, there is not any female. # * In the range of 20-40+ cigarettes smoked in a day, the majority are men. # A simple conclusion can be made by looking at these results. And this is that men tend to smoke more cigarettes than women in this data. # EDUCATION from matplotlib import cm # Prepare Data df = data.groupby("education").size() # Make the plot with pandas df.plot(kind="pie", subplots=True, figsize=(8, 8), cmap="crest", autopct="%1.1f%%") plt.title("Pie Chart of Education") plt.ylabel("") plt.show() sns.catplot( x="education", y="cigsPerDay", kind="bar", aspect=1.5, data=data, palette=palette ) # Actually, it is expected that while education levels go higher the number of cigarettes smoked per day will decrease. But above bar plot shows that there is not a relationship like that in this data. The education level that has the most cigarettes in a day is level 2 while the second one is level 4 and the least is level 3. sns.catplot( x="education", hue="currentSmoker", kind="count", aspect=1.5, data=data, palette=palette, ) # The above plot shows the frequencies of smoking status in each education level. The highest percentage of smokers are in the education level 2 while the second one is in the level 4 and the least one is in the level 3. # 4) Does age affect other variables? # HEART RATE sns.catplot( x="agerange", y="heartRate", aspect=1.5, kind="box", data=data_wo_na, palette="viridis", ) # The medians of heart rate are almost the same for all age groups except 70-79. In each group, the distribution looks symmetric. The range for the group 70-79 is the smallest but since there are only 2 people in that group, it doesn't mean anything concrete. # It is known that the heart rate is not normal. So the assumption of ANOVA, normality was not provided. Because of this, ANOVA can not be performed. Instead of ANOVA, Non-parametric Kruskal was used to see the differences between median heart rates in each age group. data_wo_na["agerange"].value_counts() ar3 = data_wo_na["heartRate"][data_wo_na["agerange"] == "30-39"] ar4 = data_wo_na["heartRate"][data_wo_na["agerange"] == "40-49"] ar5 = data_wo_na["heartRate"][data_wo_na["agerange"] == "50-59"] ar6 = data_wo_na["heartRate"][data_wo_na["agerange"] == "60-69"] ar7 = data_wo_na["heartRate"][data_wo_na["agerange"] == "70-79"] # perform Kruskal-Wallis Test stats.kruskal(ar3, ar4, ar5, ar6, ar7) # Since the p-value of the Kruskal Wallis H Test is greater than 0.05, the null hypothesis cannot be rejected. This means heart rates don't change by age groups. # SYSTOLIC BLOOD PRESSURE sns.catplot( x="agerange", y="sysBP", aspect=1.5, kind="box", data=data_wo_na, palette="viridis" ) # Median of systolic blood pressure, interquartile ranges, minimum and maximum values are getting higher until the group 70-79 except for the 50-59's min value. The distributions of all groups look symmetric. data_wo_na["agerange"].value_counts() ar3 = data_wo_na["sysBP"][data_wo_na["agerange"] == "30-39"] ar4 = data_wo_na["sysBP"][data_wo_na["agerange"] == "40-49"] ar5 = data_wo_na["sysBP"][data_wo_na["agerange"] == "50-59"] ar6 = data_wo_na["sysBP"][data_wo_na["agerange"] == "60-69"] ar7 = data_wo_na["sysBP"][data_wo_na["agerange"] == "70-79"] # perform Kruskal-Wallis Test stats.kruskal(ar3, ar4, ar5, ar6, ar7) # Since the p-value of the Kruskal Wallis H Test is smaller than 0.05, the null hypothesis can be rejected. This means systolic blood pressure differ by age groups. sns.catplot( x="agerange", y="glucose", aspect=1.5, kind="box", data=data_wo_na, palette="viridis", ) # Almost all desciptives are equal for each group. plt.hist(data_wo_na["glucose"]) # By the above histogram, it can be seen that glucose is not normal. So the assumption of ANOVA, normality was not provided. Because of this, ANOVA can not be performed. Instead of ANOVA, Non-parametric Kruskal was used to see the differences between median glucose in each age group. data_wo_na["agerange"].value_counts() ar3 = data_wo_na["glucose"][data_wo_na["agerange"] == "30-39"] ar4 = data_wo_na["glucose"][data_wo_na["agerange"] == "40-49"] ar5 = data_wo_na["glucose"][data_wo_na["agerange"] == "50-59"] ar6 = data_wo_na["glucose"][data_wo_na["agerange"] == "60-69"] ar7 = data_wo_na["glucose"][data_wo_na["agerange"] == "70-79"] # perform Kruskal-Wallis Test stats.kruskal(ar3, ar4, ar5, ar6, ar7) # Since the p-value of the Kruskal Wallis H Test is smaller than 0.05, the null hypothesis can be rejected. This means glucose differ by age groups. # **PRE-PROCESSING** plt.rc("font", size=14) sns.set(style="white") sns.set(style="whitegrid", color_codes=True) data_wo_na = data_wo_na.rename(columns={"TenYearCHD": "y"}) data_wo_na = data_wo_na.drop(columns=["agerange", "cigrange1"]) education = pd.get_dummies(data_wo_na["education"]) education.columns = ["education_1", "education_2", "education_3", "education_4"] education data_wo_na = pd.concat([data_wo_na, education], axis=1) data_wo_na = data_wo_na.drop(columns="education") data_wo_na # OUTLIER DETECTION datanum = data_wo_na[ ["cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].copy() datacat = data_wo_na.drop( columns=["cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ).copy() Q1 = datanum.quantile(0.25) Q3 = datanum.quantile(0.75) IQR = Q3 - Q1 print(IQR) datanum = datanum[ ~((datanum < (Q1 - 1.5 * IQR)) | (datanum > (Q3 + 1.5 * IQR))).any(axis=1) ] datanum.shape datanumcat = pd.concat([datacat, datanum], axis=1) datanumcat = datanumcat.dropna() datanumcat[ ["age", "cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].describe() # Descriptive statistics after deleting outliers # MULTICOLLINEARITY CHECK # Correlation plt.figure(figsize=(6, 5), dpi=80) sns.heatmap( datanumcat[["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"]].corr(), xticklabels=datanumcat[["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"]] .corr() .columns, yticklabels=datanumcat[["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"]] .corr() .columns, cmap="viridis", center=0, annot=True, ) # Decorations plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.show() # There is a strong correlation between systolic and diastolic blood pressures. But all other variables have weak correlations between each other. # Import library for VIF from statsmodels.stats.outliers_influence import variance_inflation_factor def calc_vif(X): # Calculating VIF vif = pd.DataFrame() vif["variables"] = X.columns vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])] return vif vif_data = datanumcat[ ["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].copy() # Burada standardized etmeden önceki VIF valueları da göstermek lazım calc_vif(vif_data) # In order to see the multicollinearity between variables, their VIF values were checked. They should be less than 5 or 10. In this analysis, the threshold of VIF values was decided as 10. Since VIF values are much higher in the above table, scaling or another method has to be applied. # SCALING from numpy import asarray from sklearn.preprocessing import MinMaxScaler # define min max scaler scaler = MinMaxScaler() # transform data datanum2 = datanumcat[ ["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].copy() datanum2 = pd.DataFrame(scaler.fit_transform(datanum2)) print(datanum2) datanum2.columns = ["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] datacat2 = datanumcat.drop( columns=["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ).copy() data_log = pd.concat([datanum2, datacat2], axis=1) data_log = data_log.dropna() vif_data = data_log[ ["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].copy() # Burada standardized etmeden önceki VIF valueları da göstermek lazım calc_vif(vif_data) # After deleting diaBP vif_data = data_log[ ["totChol", "sysBP", "BMI", "heartRate", "glucose"] ].copy() # Burada standardized etmeden önceki VIF valueları da göstermek lazım calc_vif(vif_data) # After scaling and elimination, VIF values became less than 10. So, there is no multicollinearity between variables now. data_log = data_log.drop(columns="diaBP", axis=1) data_log data_final = data_log.copy() data_final.columns.values X = data_final.loc[:, data_final.columns != "y"] y = data_final.loc[:, data_final.columns == "y"] # OVERSAMPLING FOR IMBALANCED DATA from collections import Counter from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler(sampling_strategy=1, random_state=42) X, y = ros.fit_resample(X, y) print("Resampled dataset shape %s" % Counter(y)) # **NAIVE BAYES** from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score model = GaussianNB() model.fit(X, y) X = data_log.drop("y", axis=1).copy() y = data_log["y"].copy() X, y = ros.fit_resample(X, y) X = pd.DataFrame(X) X.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) # param_grid_nb = { # 'var_smoothing': np.logspace(0,-9, num=100) # } # grid_search= GridSearchCV(GaussianNB(), param_grid_nb,cv=7) # grid_search.fit(X_train,y_train) # grid_search.best_params_ modelnb = GaussianNB(var_smoothing=0.0005336699231206307) modelnb.fit(X_train, y_train) y2_modelnb = modelnb.predict(X_test) accuracy_score(y_test, y2_modelnb) from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score cf = confusion_matrix(y_test, y2_modelnb) print(classification_report(y_test, y2_modelnb)) group_names = ["True Negative", "False Positive", "False Negative", "True Positive"] group_counts = ["{0:0.0f}".format(value) for value in cf.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)] labels = [ f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages) ] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf, annot=labels, fmt="", cmap=palette) perf_nb = pd.DataFrame( { "Train_Score": modelnb.score(X_train, y_train), "Test_Score": modelnb.score(X_test, y_test), "Precision_Score": precision_score(y_test, y2_modelnb), "Recall_Score": recall_score(y_test, y2_modelnb), "F1_Score": f1_score(y_test, y2_modelnb), }, index=["Naives Bayes"], ) # **KNN** from sklearn.neighbors import KNeighborsClassifier X = data_log.drop("y", axis=1).copy() y = data_log["y"].copy() X, y = ros.fit_resample(X, y) X = pd.DataFrame(X) X.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) # from sklearn.model_selection import GridSearchCV # gridSearchParameters = {'n_neighbors' : [i for i in range(3,10,2)], # 'weights' : ['uniform', 'distance'], # 'metric' : ['euclidean','manhattan','minkowski','hamming'] # } # grid = GridSearchCV(KNeighborsClassifier(), gridSearchParameters, cv=7) # grid.fit(X_train,y_train) # grid.best_params_ modelknn = KNeighborsClassifier(metric="hamming", n_neighbors=3, weights="distance") modelknn.fit(X_train, y_train) y2_modelknn = modelknn.predict(X_test) accuracy_score(y_test, y2_modelknn) cf = confusion_matrix(y_test, y2_modelknn) print(classification_report(y_test, y2_modelknn)) group_names = ["True Negative", "False Positive", "False Negative", "True Positive"] group_counts = ["{0:0.0f}".format(value) for value in cf.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)] labels = [ f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages) ] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf, annot=labels, fmt="", cmap=palette) perf_knn = pd.DataFrame( { "Train_Score": modelknn.score(X_train, y_train), "Test_Score": modelknn.score(X_test, y_test), "Precision_Score": precision_score(y_test, y2_modelknn), "Recall_Score": recall_score(y_test, y2_modelknn), "F1_Score": f1_score(y_test, y2_modelknn), }, index=["KNN"], ) # **LOGISTIC REGRESSION** data_log["y"].value_counts() count_no_risk = len(data_log[data_log["y"] == 0]) count_risk = len(data_log[data_log["y"] == 1]) pct_of_no_risk = count_no_risk / (count_no_risk + count_risk) print("percentage of no risk", pct_of_no_risk * 100) pct_of_risk = count_risk / (count_no_risk + count_risk) print("percentage of risk", pct_of_risk * 100) data_log data_final = data_log.copy() data_final.columns.values X = data_final.loc[:, data_final.columns != "y"] y = data_final.loc[:, data_final.columns == "y"] # OVERSAMPLING FOR IMBALANCED DATA from collections import Counter from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler(sampling_strategy=1, random_state=42) X, y = ros.fit_resample(X, y) print("Resampled dataset shape %s" % Counter(y)) X = pd.DataFrame( X, columns=[ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ], ) X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) pd.DataFrame(y).value_counts() X_for_logistic = data_final.loc[:, data_final.columns != "y"] y_for_logistic = data_final.loc[:, data_final.columns == "y"] X_for_logistic, y_for_logistic = ros.fit_resample(X_for_logistic, y_for_logistic) X_for_logistic = pd.DataFrame(X_for_logistic) X_for_logistic.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X_for_logistic[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X_for_logistic[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train_lg, X_test_lg, y_train_lg, y_test_lg = train_test_split( X_for_logistic, y_for_logistic, test_size=0.25, random_state=42, stratify=y_for_logistic, ) columns = X_train.columns os_data_X = pd.DataFrame(X_train, columns=columns) os_data_y = pd.DataFrame(y_train) os_data_y.columns = ["y"] os_data_y.value_counts() print("length of oversampled data is ", len(os_data_X)) print( "Number of no subscription in oversampled data", len(os_data_y[os_data_y["y"] == 0]) ) print("Number of subscription", len(os_data_y[os_data_y["y"] == 1])) print( "Proportion of no subscription data in oversampled data is ", len(os_data_y[os_data_y["y"] == 0]) / len(os_data_X), ) print( "Proportion of subscription data in oversampled data is ", len(os_data_y[os_data_y["y"] == 1]) / len(os_data_X), ) # from sklearn.model_selection import GridSearchCV # grid= dict(solver = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'], # C= [0.001,0.01,0.1,1,10,100,1000], # penalty= ['none', 'l1', 'l2', 'elasticnet'])# l1 lasso l2 ridge # logreg=LogisticRegression() # logreg_cv=GridSearchCV(logreg,grid,cv=10) # logreg_cv.fit(X_train_lg,y_train_lg) # print("tuned hpyerparameters :(best parameters) ",logreg_cv.best_params_) # print("accuracy :",logreg_cv.best_score_) # By the GridSearchCV method, the best hyperparameters were found and they were used in the logistic model. data_final_vars = data_final.columns.values.tolist() y = ["y"] X = [i for i in data_final_vars if i not in y] from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression logreg = LogisticRegression(C=0.1, penalty="l1", solver="saga") rfe = RFE(logreg, 20) rfe = rfe.fit(os_data_X, os_data_y.values.ravel()) print(rfe.support_) print(rfe.ranking_) X = os_data_X y = os_data_y["y"] import statsmodels.api as sm logit_model = sm.Logit(np.asarray(y), X.astype(float)) result = logit_model.fit() print(result.summary2()) # Since there are variables that have p-values greater than 0.05, there should be an elimination among independent variables. X = os_data_X.drop( columns=[ "BPMeds", "diabetes", "sysBP", "totChol", "currentSmoker", "prevalentStroke", "heartRate", "glucose", ], axis=1, ).copy() y = os_data_y["y"] logit_model = sm.Logit(np.asarray(y), X.astype(float)) result = logit_model.fit() print(result.summary2()) from sklearn.linear_model import LogisticRegression from sklearn import metrics X_train, X_test, y_train, y_test = train_test_split( X_for_logistic, y_for_logistic, test_size=0.25, random_state=42, stratify=y_for_logistic, ) logreg = LogisticRegression(C=0.1, penalty="l1", solver="saga") logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) from sklearn.metrics import confusion_matrix cf = confusion_matrix(y_test, y_pred) print(cf) from sklearn.metrics import classification_report l_cr = classification_report(y_test, y_pred, output_dict=True) print(classification_report(y_test, y_pred)) perf_lr = pd.DataFrame( { "Train_Score": logreg.score(X_train, y_train), "Test_Score": logreg.score(X_test, y_test), "Precision_Score": precision_score(y_test, y_pred), "Recall_Score": recall_score(y_test, y_pred), "F1_Score": f1_score(y_test, y_pred), }, index=["Logistic Regression"], ) # **DECISION TREE** # Load libraries import pandas as pd from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier from sklearn.model_selection import train_test_split # Import train_test_split function from sklearn import ( metrics, ) # Import scikit-learn metrics module for accuracy calculation X = data_log.drop(columns="y") # Features y = data_log["y"] # Target variable X, y = ros.fit_resample(X, y) X = pd.DataFrame(X) X.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) # param_dict={"criterion" : ['gini', 'entropy'], "max_depth":range(1,10),"min_samples_split":range(1,10),"min_samples_leaf":range(1,5) } # clf_GS = GridSearchCV( DecisionTreeClassifier(),param_grid= param_dict,cv=10,verbose=1,n_jobs=-1) # clf_GS.fit(X_train, y_train) # clf_GS.best_params_ # clf_GS.best_estimator_ # Create Decision Tree classifer object clf = DecisionTreeClassifier( ccp_alpha=0.0, class_weight=None, criterion="gini", max_depth=9, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=3, min_weight_fraction_leaf=0.0, presort="deprecated", random_state=None, splitter="best", ) # Train Decision Tree Classifer clf = clf.fit(X_train, y_train) # Predict the response for test dataset y_pred = clf.predict(X_test) # Model Accuracy, how often is the classifier correct? print("Accuracy:", metrics.accuracy_score(y_test, y_pred)) dt_cr = classification_report(y_test, y_pred, output_dict=True) perf_dt = pd.DataFrame( { "Train_Score": clf.score(X_train, y_train), "Test_Score": clf.score(X_test, y_test), "Precision_Score": precision_score(y_test, y_pred), "Recall_Score": recall_score(y_test, y_pred), "F1_Score": f1_score(y_test, y_pred), }, index=["Decision Tree"], ) cf = confusion_matrix(y_test, y_pred) group_names = ["True Negative", "False Positive", "False Negative", "True Positive"] group_counts = ["{0:0.0f}".format(value) for value in cf.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)] labels = [ f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages) ] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf, annot=labels, fmt="", cmap=palette) print(classification_report(y_test, y_pred)) # **RANDOM FOREST** X = data_log.drop("y", axis=1) y = data_log["y"].copy() X, y = ros.fit_resample(X, y) X = pd.DataFrame(X) X.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) # from sklearn.model_selection import RandomizedSearchCV # number of trees in random forest # n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)] # number of features at every split # max_features = ['auto', 'sqrt'] # max depth # max_depth = [int(x) for x in np.linspace(100, 500, num = 11)] max_depth.append(None) # create random grid # random_grid = { 'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth } # Random search of parameters # rfc_random = RandomizedSearchCV(estimator = rfc, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1) # Fit the model # rfc_random.fit(X_train, y_train) # print results # print(rfc_random.best_params_) from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score rfc = RandomForestClassifier(n_estimators=200, max_features="sqrt", max_depth=None) rfc.fit(X_train, y_train) rfc_predict = rfc.predict(X_test) rfc_cv_score = cross_val_score(rfc, X, y, cv=10, scoring="roc_auc") print("=== Confusion Matrix ===") print(confusion_matrix(y_test, rfc_predict)) print("\n") print("=== Classification Report ===") print(classification_report(y_test, rfc_predict)) print("\n") print("=== All AUC Scores ===") print(rfc_cv_score) print("\n") print("=== Mean AUC Score ===") print("Mean AUC Score - Random Forest: ", rfc_cv_score.mean()) sorted_idx = rfc.feature_importances_.argsort() plt.barh(data_log.columns[sorted_idx], rfc.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") # The most important 5 features are respectively age, body mass index, total cholesterol, systolic blood pressure, and glucose. cf = confusion_matrix(y_test, rfc_predict) group_names = ["True Negative", "False Positive", "False Negative", "True Positive"] group_counts = ["{0:0.0f}".format(value) for value in cf.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)] labels = [ f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages) ] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf, annot=labels, fmt="", cmap=palette) perf_rf = pd.DataFrame( { "Train_Score": rfc.score(X_train, y_train), "Test_Score": rfc.score(X_test, y_test), "Precision_Score": precision_score(y_test, rfc_predict), "Recall_Score": recall_score(y_test, rfc_predict), "F1_Score": f1_score(y_test, rfc_predict), }, index=["Random Forest"], ) # **SUPPORT VECTOR MACHINE** X = data_log.drop("y", axis=1).copy() y = data_log["y"].copy() X, y = ros.fit_resample(X, y) X = pd.DataFrame(X) X.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) from sklearn.svm import SVC # defining parameter range # param_grid = {'C': [0.1, 1, 10, 100, 1000], # 'gamma': [1, 0.1, 0.01, 0.001, 0.0001], # 'kernel': ['rbf']} # grid = GridSearchCV(SVC(), param_grid, refit = True, verbose = 3) # fitting the model for grid search # grid.fit(X_train, y_train) from sklearn.svm import SVC svclassifier = SVC(kernel="rbf", C=100, gamma=1) svclassifier.fit(X_train, y_train) y_pred = svclassifier.predict(X_test) from sklearn.metrics import classification_report, confusion_matrix cf = confusion_matrix(y_test, y_pred) print(classification_report(y_test, y_pred)) perf_svm = pd.DataFrame( { "Train_Score": svclassifier.score(X_train, y_train), "Test_Score": svclassifier.score(X_test, y_test), "Precision_Score": precision_score(y_test, y_pred), "Recall_Score": recall_score(y_test, y_pred), "F1_Score": f1_score(y_test, y_pred), }, index=["SVM"], ) group_names = ["True Negative", "False Positive", "False Negative", "True Positive"] group_counts = ["{0:0.0f}".format(value) for value in cf.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)] labels = [ f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages) ] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf, annot=labels, fmt="", cmap=palette) A = pd.concat([perf_nb, perf_lr, perf_dt, perf_rf, perf_knn, perf_svm]) A
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/696/69696298.ipynb
heart-disease-prediction-using-logistic-regression
dileep070
[{"Id": 69696298, "ScriptId": 19040357, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2522717, "CreationDate": "08/02/2021 20:22:37", "VersionNumber": 3.0, "Title": "Coronary Heart Disease Prediction", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 1028.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 1023.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93169601, "KernelVersionId": 69696298, "SourceDatasetVersionId": 478477}]
[{"Id": 478477, "DatasetId": 222487, "DatasourceVersionId": 494455, "CreatorUserId": 3280617, "LicenseName": "Unknown", "CreationDate": "06/07/2019 06:12:56", "VersionNumber": 1.0, "Title": "Logistic regression To predict heart disease", "Slug": "heart-disease-prediction-using-logistic-regression", "Subtitle": "heart disease prediction", "Description": "**LOGISTIC REGRESSION - HEART DISEASE PREDICTION**\n\n**Introduction**\nWorld Health Organization has estimated 12 million deaths occur worldwide, every year due to Heart diseases. Half the deaths in the United States and other developed countries are due to cardio vascular diseases. The early prognosis of cardiovascular diseases can aid in making decisions on lifestyle changes in high risk patients and in turn reduce the complications. This research intends to pinpoint the most relevant/risk factors of heart disease as well as predict the overall risk using logistic regression\nData Preparation\n \n\nSource\nThe dataset is publically available on the Kaggle website, and it is from an ongoing cardiovascular study on residents of the town of Framingham, Massachusetts. The classification goal is to predict whether the patient has 10-year risk of future coronary heart disease (CHD).The dataset provides the patients\u2019 information. It includes over 4,000 records and 15 attributes.\nVariables \nEach attribute is a potential risk factor. There are both demographic, behavioral and medical risk factors.\n\n\nDemographic:\n\u2022\tSex: male or female(Nominal)\n\u2022\tAge: Age of the patient;(Continuous - Although the recorded ages have been truncated to whole numbers, the concept of age is continuous)\nBehavioral\n\u2022\tCurrent Smoker: whether or not the patient is a current smoker (Nominal)\n\u2022\tCigs Per Day: the number of cigarettes that the person smoked on average in one day.(can be considered continuous as one can have any number of cigarettes, even half a cigarette.)\nMedical( history)\n\u2022\tBP Meds: whether or not the patient was on blood pressure medication (Nominal)\n\u2022\tPrevalent Stroke: whether or not the patient had previously had a stroke (Nominal)\n\u2022\tPrevalent Hyp: whether or not the patient was hypertensive (Nominal)\n\u2022\tDiabetes: whether or not the patient had diabetes (Nominal)\nMedical(current)\n\u2022\tTot Chol: total cholesterol level (Continuous)\n\u2022\tSys BP: systolic blood pressure (Continuous)\n\u2022\tDia BP: diastolic blood pressure (Continuous)\n\u2022\tBMI: Body Mass Index (Continuous)\n\u2022\tHeart Rate: heart rate (Continuous - In medical research, variables such as heart rate though in fact discrete, yet are considered continuous because of large number of possible values.)\n\u2022\tGlucose: glucose level (Continuous)\nPredict variable (desired target)\n\u2022\t10 year risk of coronary heart disease CHD (binary: \u201c1\u201d, means \u201cYes\u201d, \u201c0\u201d means \u201cNo\u201d)\nLogistic Regression\nLogistic regression is a type of regression analysis in statistics used for prediction of outcome of a categorical dependent variable from a set of predictor or independent variables. In logistic regression the dependent variable is always binary. Logistic regression is mainly used to for prediction and also calculating the probability of success.\nThe results above show some of the attributes with P value higher than the preferred alpha(5%) and thereby showing low statistically significant relationship with the probability of heart disease. Backward elimination approach is used here to remove those attributes with highest P-value one at a time followed by running the regression repeatedly until all attributes have P Values less than 0.05.\nFeature Selection: Backward elimination (P-value approach)\nLogistic regression equation\nP=e\u03b20+\u03b21X1/1+e\u03b20+\u03b21X1P=e\u03b20+\u03b21X1/1+e\u03b20+\u03b21X1\nWhen all features plugged in:\nlogit(p)=log(p/(1\u2212p))=\u03b20+\u03b21\u2217Sexmale+\u03b22\u2217age+\u03b23\u2217cigsPerDay+\u03b24\u2217totChol+\u03b25\u2217sysBP+\u03b26\u2217glucoselogit(p)=log(p/(1\u2212p))=\u03b20+\u03b21\u2217Sexmale+\u03b22\u2217age+\u03b23\u2217cigsPerDay+\u03b24\u2217totChol+\u03b25\u2217sysBP+\u03b26\u2217glucose\n\n\n\n\n\n\n\n\n\n\nInterpreting the results: Odds Ratio, Confidence Intervals and P-values\n\u2022\tThis fitted model shows that, holding all other features constant, the odds of getting diagnosed with heart disease for males (sex_male = 1)over that of females (sex_male = 0) is exp(0.5815) = 1.788687. In terms of percent change, we can say that the odds for males are 78.8% higher than the odds for females. \n\u2022\tThe coefficient for age says that, holding all others constant, we will see 7% increase in the odds of getting diagnosed with CDH for a one year increase in age since exp(0.0655) = 1.067644. \n\u2022\tSimilarly , with every extra cigarette one smokes thers is a 2% increase in the odds of CDH. \n\u2022\tFor Total cholesterol level and glucose level there is no significant change. \n\n\u2022\tThere is a 1.7% increase in odds for every unit increase in systolic Blood Pressure.\n\nModel Evaluation - Statistics\nFrom the above statistics it is clear that the model is highly specific than sensitive. The negative values are predicted more accurately than the positives.\nPredicted probabilities of 0 (No Coronary Heart Disease) and 1 ( Coronary Heart Disease: Yes) for the test data with a default classification threshold of 0.5\nlower the threshold\nSince the model is predicting Heart disease too many type II errors is not advisable. A False Negative ( ignoring the probability of disease when there actually is one) is more dangerous than a False Positive in this case. Hence in order to increase the sensitivity, threshold can be lowered.\n\n\n\nConclusions\n\u2022\tAll attributes selected after the elimination process show P-values lower than 5% and thereby suggesting significant role in the Heart disease prediction.\n\n\u2022\tMen seem to be more susceptible to heart disease than women. Increase in age, number of cigarettes smoked per day and systolic Blood Pressure also show increasing odds of having heart disease\n\n\u2022\tTotal cholesterol shows no significant change in the odds of CHD. This could be due to the presence of 'good cholesterol(HDL) in the total cholesterol reading. Glucose too causes a very negligible change in odds (0.2%)\n\n\u2022\tThe model predicted with 0.88 accuracy. The model is more specific than sensitive. Overall model could be improved with more data\n\nAppendix\nhttp://www.who.int/mediacentre/factsheets/fs317/en/\nData Source References\nhttps://www.kaggle.com/amanajmera1/framingham-heart-study-dataset/data", "VersionNotes": "Initial release", "TotalCompressedBytes": 195955.0, "TotalUncompressedBytes": 195955.0}]
[{"Id": 222487, "CreatorUserId": 3280617, "OwnerUserId": 3280617.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 478477.0, "CurrentDatasourceVersionId": 494455.0, "ForumId": 233601, "Type": 2, "CreationDate": "06/07/2019 06:12:56", "LastActivityDate": "06/07/2019", "TotalViews": 249932, "TotalDownloads": 35877, "TotalVotes": 376, "TotalKernels": 176}]
[{"Id": 3280617, "UserName": "dileep070", "DisplayName": "Dileep", "RegisterDate": "05/28/2019", "PerformanceTier": 0}]
import numpy as np import pandas as pd import matplotlib.pyplot as plt from tabulate import tabulate import missingno as msno import tabulate as tb import statsmodels.api as sm from statsmodels.formula.api import ols import scipy.stats as stats import statsmodels.stats.multicomp as multi from sklearn import preprocessing import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import seaborn as sns data = pd.DataFrame( pd.read_csv( "../input/heart-disease-prediction-using-logistic-regression/framingham.csv" ) ) display(data) data.shape data.dtypes np.sum(data.isnull()) msno.matrix(data) # In the above graph, white lines represent missing values and their location. As can be seen above, the variable with the most missing values is glucose while 9 other variables don't have any such as gender, age, smoking status. In this project, missing values will be imputed by the appropriate method. # Descriptive statistics of numeric variables data[ ["age", "cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].dropna().describe() # The table of 10-year risk of coronary heart disease and gender shows the number of people in each group. By that, we can conclude that men in the data who have a 10-year risk of coronary heart disease are greater than women while for the other group that has no risk it is the opposite. # Imputed data data_wo_na = data.copy() data_wo_na["cigsPerDay"] = data_wo_na["cigsPerDay"].fillna( data_wo_na["cigsPerDay"].mode().iloc[0] ) data_wo_na["totChol"] = data_wo_na["totChol"].fillna(data_wo_na["totChol"].median()) data_wo_na["BMI"] = data_wo_na["BMI"].fillna(data_wo_na["BMI"].median()) data_wo_na["heartRate"] = data_wo_na["heartRate"].fillna( data_wo_na["heartRate"].median() ) data_wo_na["glucose"] = data_wo_na["glucose"].fillna(data_wo_na["glucose"].median()) data_wo_na["education"] = data_wo_na["education"].fillna( data_wo_na["education"].mode().iloc[0] ) data_wo_na["BPMeds"] = data_wo_na["BPMeds"].fillna(0) bins = [29, 39, 49, 59, 69, 79] labels = ["30-39", "40-49", "50-59", "60-69", "70-79"] data["agerange"] = pd.cut(data.age, bins, labels=labels, include_lowest=True) bins = [29, 39, 49, 59, 69, 79] labels = ["30-39", "40-49", "50-59", "60-69", "70-79"] data_wo_na["agerange"] = pd.cut(data.age, bins, labels=labels, include_lowest=True) data[ [ "male", "education", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "TenYearCHD", "education", ] ] = data[ [ "male", "education", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "TenYearCHD", "education", ] ].astype( "category" ) print(data.dtypes) data_wo_na[ [ "male", "education", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "TenYearCHD", "education", ] ] = data_wo_na[ [ "male", "education", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "TenYearCHD", "education", ] ].astype( "category" ) print(data_wo_na.dtypes) data.groupby("TenYearCHD").mean() # All numeric variables are higher in the group who have a 10-year risk of coronary heart disease. # Descriptive statistics after imputation data_wo_na[ ["age", "cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].describe() # **EDA-RESEARCH QUESTIONS** # *1) How does the 10-year risk of coronary heart disease change by other variables in the data?* palette = sns.color_palette("mako_r", 6) sns.catplot(x="TenYearCHD", kind="count", palette=palette, data=data) TenYearCHD_table = pd.crosstab(index=data["TenYearCHD"], columns="count") sns.heatmap(TenYearCHD_table, cmap=palette, annot=True, fmt="g") # As it can be seen in the above graph and the frequency table, data consist of 3594 people who don't have a 10-year risk of coronary heart disease and 644 people who have the risk. # GENDER x, y, hue = "TenYearCHD", "proportion", "male" hue_order = ["Female", "Male"] ( data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index() .pipe((sns.barplot, "data"), x=x, y=y, hue=hue, palette=palette) ) # Most of the people who don't have a 10-year risk of coronary heart disease are female while the ones who have the risk are generally male. from scipy.stats import chi2_contingency c = pd.crosstab(index=data["male"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was rejected, that is, the 10-year risk of coronary heart disease and gender have a significant relation. # AGE age_CHD_table = pd.crosstab(index=data["agerange"], columns=data["TenYearCHD"]) sns.heatmap(age_CHD_table, cmap=palette, annot=True, fmt="g") x, y, hue = "TenYearCHD", "proportion", "agerange" hue_order = ["30-39", "40-49", "50-59", "60-69", "70-79"] ( data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index() .pipe((sns.barplot, "data"), x=x, y=y, hue=hue, palette=palette) ) # In the above bar plots, the first one represents the frequency of 10-year risk of CHD in each age group while the second one shows the proportions. # By age frequency table and the first graph, it can be said that 40-49 age group is the most crowded one while age group 70-79 have only 2 people. # The age group and CHD table and the second graph show that age and the 10-year risk of coronary heart disease are directly proportional. In the age group 70-79, the risk increases to 50 percent while in the age group 30-39 it is less than 5 percent. # EDUCATION x, y, hue = "TenYearCHD", "proportion", "education" hue_order = [1, 2, 3, 4] ( data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index() .pipe((sns.barplot, "data"), x=x, y=y, hue=hue, palette=palette) ) # The proportion of the 10-year risk of coronary heart disease by education graph shows the percentages of 10-year risk of coronary heart disease in each education group. There is no huge difference between those percentages but the greatest risk is in education group 1 while the lowest is in group 2. c = pd.crosstab(index=data["education"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was rejected, that is, the 10-year risk of coronary heart disease and education have a significant relation. # PREVALENT STROKE c = pd.crosstab(index=data["prevalentStroke"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") # The data consist of only 25 people who had a stroke before and 44 percent of them have a 10-year risk of coronary heart disease. This seems like a big percentage but since the sample is very small, it did not give a considerable meaning apart from this data. stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was rejected, that is, the 10-year risk of coronary heart disease and prevalent stroke have a significant relation. # PREVALENT HYPERTENSION c = pd.crosstab(index=data["prevalentHyp"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was rejected, that is, the 10-year risk of coronary heart disease and prevalent hypertension have a significant relation. # DIABETES c = pd.crosstab(index=data["diabetes"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") # The table above shows the frequencies of people who have diabetes or not by the 10-year risk of coronary heart disease. And by that, it can be concluded that there is a huge difference in percentages for people who have diabetes. For people who don't have diabetes, the risk is 14.6 percent while for the other group the risk is 63.3 percent. stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was rejected, that is, the 10-year risk of coronary heart disease and diabetes have a significant relation. # SMOKING STATUS sns.catplot( x="currentSmoker", hue="TenYearCHD", kind="count", palette=palette, data=data ) # The above table shows the change in 10-year risk of coronary heart disease by smoking status. It can be said that smoking status has an inconsiderable effect on the risk in the data. c = pd.crosstab(index=data["currentSmoker"], columns=data["TenYearCHD"]) sns.heatmap(c, cmap=palette, annot=True, fmt="g") stat, p, dof, expected = chi2_contingency(c) # interpret p-value alpha = 0.05 print("Chi-Square Test Result") print("p value is " + str(p)) if p <= alpha: print("Dependent (reject H0)") else: print("Independent (H0 holds true)") # Therefore, H0 was accepted, that is, the 10-year risk of coronary heart disease and smoking status does not have a significant relation. # BODY MASS INDEX ager_10ychd = pd.crosstab( index=data["agerange"], columns=data["TenYearCHD"], values=data["BMI"], aggfunc=np.mean, ).round(0) sns.heatmap(ager_10ychd, cmap=palette, annot=True, fmt="g") sns.catplot( x="TenYearCHD", y="BMI", hue="agerange", kind="box", data=data, palette=palette ) # The body mass index box plot shows the distributions for the 10-year risk of coronary heart disease by age group. # In the age group 30-39; There is not much difference in the median but for the risk group, it is slightly higher. Minimum, maximum values, and the first quartile are lower in the risk group but the third quartile is higher. # In the age group 40-49; The distribution for the risk group is almost the same as the group of people who don't have the risk. # In the age group 50-59; The distribution for the risk group is almost the same as the group of people who don't have the risk but the first and third quartile is slightly higher in the risk group. # In the age group 60-69; There is not much difference in the median and minimum value. Maximum values and the first quartile are lower in the risk group but the third quartile is higher. # GLUCOSE, TOTAL CHOLESTEROL, SYSTOLIC BLOOD PRESSURE, DIASTOLIC BLOOD PRESSURE, HEART RATE import matplotlib.pyplot as plt import seaborn as sns fig, axes = plt.subplots(2, 3, figsize=(18, 10)) sns.boxplot( ax=axes[0, 0], data=data_wo_na, y="glucose", x="TenYearCHD", palette=palette ) sns.boxplot( ax=axes[0, 1], data=data_wo_na, y="totChol", x="TenYearCHD", palette=palette ) sns.boxplot(ax=axes[1, 0], data=data_wo_na, y="sysBP", x="TenYearCHD", palette=palette) sns.boxplot(ax=axes[1, 1], data=data_wo_na, y="diaBP", x="TenYearCHD", palette=palette) sns.boxplot( ax=axes[0, 2], data=data_wo_na, y="heartRate", x="TenYearCHD", palette=palette ) # The above box plots show the distributions in each variable by the 10-year risk of coronary heart disease. # **Glucose-10 Year Risk of Coronary Heart Disease:** The distributions for both the risk group and the group of people who don't have the risk are almost the same except the third quartile which is greater and the maximum value which is slightly greater for the risk group. Both groups have so many outliers. # **Total Cholesterol-10 Year Risk of Coronary Heart Disease:** The distributions for both the risk group and the group of people who don't have the risk are almost the same. Both groups have so many outliers. # **Heart Rate-10 Year Risk of Coronary Heart Disease:** The distributions for both the risk group and the group of people who don't have the risk are almost the same except for the third quartile, minimum and maximum values which are slightly greater for the risk group. Both groups have so many outliers especially the group of people who don't have the risk. # **Systolic Blood Pressure-10 Year Risk of Coronary Heart Disease:** In the group that doesn't have a 10-year risk of coronary heart disease, the median is about 130 while in the other group that has a 10-year risk of coronary heart disease it is almost 150. The minimum systolic blood pressure value for both two groups are the same while the maximum value is much higher in the risk group. Also, the first and third quartiles are so much higher in the risk group. # **Diastolic Blood Pressure-10 Year Risk of Coronary Heart Disease:** Like systolic blood pressure, diastolic blood pressure's median, max, first quartile, and third quartile values are higher for the risk group. # 2) Does smoking status and the number of cigarettes smoked in a day affect heart rate and systolic blood pressure? data["cigsPerDay"].value_counts() bins = [0, 1, 3, 7, 11, 15, 19, 23, 27, 30, 39, 49, 80] labels = [ "0", "1-3", "4-7", "8-11", "12-15", "16-19", "20-23", "24-27", "27-30", "31-39", "40-49", "50+", ] data["cigrange"] = pd.cut(data.cigsPerDay, bins, labels=labels, include_lowest=True) data[["cigrange", "cigsPerDay"]] data_wo_na["cigsPerDay"].value_counts() bins = [0, 1, 3, 7, 11, 15, 19, 23, 27, 30, 39, 49, 80] labels = [ "0", "1-3", "4-7", "8-11", "12-15", "16-19", "20-23", "24-27", "27-30", "31-39", "40-49", "50+", ] data_wo_na["cigrange1"] = pd.cut( data_wo_na.cigsPerDay, bins, labels=labels, include_lowest=True ) data_wo_na[["cigrange1", "cigsPerDay"]] # Groups that have the range of cigarettes smoked in a day were created to see the results in graphs without a mess. # HEART RATE cgr_10ychd = pd.crosstab( index=data_wo_na["cigrange1"], columns=data_wo_na["agerange"], values=data_wo_na["heartRate"], aggfunc=np.mean, ).round(0) sns.heatmap(cgr_10ychd, cmap=palette, annot=True, fmt="g") sns.catplot( x="cigrange1", y="heartRate", aspect=1.5, kind="box", data=data_wo_na, palette="viridis", ) # The above box plot shows the distribution of heart rate and the range of cigarettes smoked in a day. Except for the 31-39 and 50+ groups, every group has almost the same median heart rate. The comments for each group were written as a comparison with the previous one. # * For the group who are non-smokers, the box plot shows a symmetric distribution. Also, this group has many outliers and this can be because of other variables such as age. Without outliers, the minimum value for this group is almost 45 which is very low even in resting. The maximum value is near 105 and the median is near 75. # * For the group 1-3, while the minimum heart rate increased, the maximum heart rate decreased. There is a left-skewed distribution in this group and this means that there is an agglomeration in between the median and the third quartile. So, we can say that in this group there are more people who have heartrate above the median than who have heartrate below the median. # * For the group 4-7, minimum and maximum values for heart rate are almost the same as the 1-3 group but the first and third quartiles are higher. This group also has an almost symmetric distribution. # * For the group 8-11, while the minimum value is lower the maximum value is higher than the previous group. The first and third quartiles are almost the same except the third quartile is a little lower in the 8-11. This group also looks symmetric. # * For the group 12-15 there is an increase in the minimum value but the maximum value remains the same as the previous group. The first and third quartiles are higher and there is a right-skewed distribution in this group. This means there is an agglomeration between the first quartile and the median. # * For the groups 16-19 the range of the minimum and maximum is getting the smallest. Also, the interquartile range is the smallest too. The distribution looks symmetric and there are only 2 outliers that are lower than the minimum value and very higher than the maximum. This can be a cause of other factors such as age. # * For the group 20-23 the distribution and descriptive statistics are almost equal to the group 12-15. But there are so many outliers in this case that are higher than the maximum value. # * For the group 24-27 the minimum value is much higher while the maximum is much lower than the previous one. The first quartile is almost the same but the third quartile is a little higher. There is a right-skewed distribution in this group which means there is an agglomeration in between the first quartile and the median. # * For the group 27-30 the minimum value is much lower while the maximum is much higher than the previous one. While the third quartile is almost the same, the first quartile is a little lower. There is also right-skewed distribution but not so obvious like the previous group. # * For the group 31-39 the median is lower than previous ones and there is a really obvious decrease in the maximum value which is confusing because it is expected that the number of cigarettes smoked and the heart rate are directly proportional. It can be because of other variables. The minimum value remains the same. There is a right-skewed distribution as well. # * For the group 40-49 there is almost the same distribution as the 27-30 except the first quartile and the minimum value which are a little higher. # * For the 50+ group the median and ranges between maximum and minimum values getting smaller. There is an obvious right-skewed distribution which means there is an agglomeration in between the first quartile and the median. plt.hist(data_wo_na["heartRate"]) stats.shapiro(data_wo_na["heartRate"]) # By the above histogram, it can be seen that heart rate is not normal. So the assumption of ANOVA, normality was not provided. Because of this, ANOVA can not be performed. Instead of ANOVA, Non-parametric Kruskal was used to see the differences between median systolic blood pressure in each age group. data_wo_na["cigrange1"].value_counts() data_wo_na["cigrange1"].value_counts() cr0 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "0"] cr1 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "1-3"] cr2 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "4-7"] cr3 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "8-11"] cr4 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "12-15"] cr5 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "16-19"] cr6 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "20-23"] cr7 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "24-27"] cr8 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "28-31"] cr9 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "31-39"] cr10 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "40-49"] cr11 = data_wo_na["heartRate"][data_wo_na["cigrange1"] == "50+"] cr0 = cr0.append(cr1) cr2 = cr2.append(cr3) cr4 = cr4.append(cr5) cr6 = cr6.append(cr8) cr8 = cr8.append(cr10) cr10 = cr10.append(cr11) # perform Kruskal-Wallis Test stats.kruskal(cr0, cr2, cr4, cr6, cr8, cr10) # Since the p-value of the Kruskal Wallis H Test is smaller than 0.05, the null hypothesis can be rejected. This means the median heart rate differ by the number of cigarettes smoked in a day. # SYSTOLIC BLOOD PRESSURE cgr_10ychdsys = pd.crosstab( index=data_wo_na["cigrange1"], columns=data_wo_na["agerange"], values=data_wo_na["sysBP"], aggfunc=np.mean, ).round(0) sns.heatmap(cgr_10ychdsys, cmap=palette, annot=True, fmt="g") sns.catplot( x="cigrange", y="sysBP", aspect=1.5, kind="box", data=data, palette="viridis" ) # The above box plot shows the distribution of systolic blood pressure and the range of cigarettes smoked in a day. The comments for each group were written as a comparison with the previous one. # * For the group who are non-smokers, the box plot shows a symmetric distribution. This group has many outliers and this can be because of other variables such as age. Without outliers, the minimum value for this group is almost 75. The maximum value is near 185 and the median is near 130. # * For the group 1-3, while the minimum systolic blood pressure(near 100) increased, the maximum systolic blood pressure(near 180) and the median(near 130) decreased. The distribution looks symmetric. # * For the group 4-7, minimum, maximum, first quartile, and third quartile are all lower than the previous group. This group also has an almost symmetric distribution. # * For the group 8-11, the minimum value and the maximum values are lower than the previous group. The first and third quartiles are almost the same. There is a right-skewed distribution which means there is an agglomeration between the first quartile and the median. # * For the group 12-15 there is a decrease in the minimum and the maximum values. The first and third quartiles are almost the same and there is an almost symmetric distribution in this group. # * For the groups 16-19 the minimum, maximum, first, and third quartiles and also median are higher. There is a right-skewed distribution. # * For the group 20-23 all descriptive statistics are lower than the previous group. # * For the group 24-27 the minimum value is much higher while the maximum is much lower than the previous one. The first quartile is almost the same but the third quartile is a little lower. There is a right-skewed distribution in this group which means there is an agglomeration in between the first quartile and the median. # * For the group 27-30 the minimum value is much lower while the maximum is much higher than the previous one. While the first quartile is almost the same, the first quartile is a little higher. There is an almost symmetric distribution. # * For the group 31-39 all descriptive statistics are higher except the first quartile and the distribution looks symmetric. # * For the group 40-49 all descriptive statistics are lower except the first quartile and the distribution looks symmetric. # * For the 50+ group the median and ranges between maximum and minimum values getting smaller. There is an obvious right-skewed distribution which means there is an agglomeration in between the first quartile and the median. plt.hist(data_wo_na["sysBP"]) stats.shapiro(data_wo_na["sysBP"]) # By the above histogram, it can be seen that systolic blood pressure is not normal. So the assumption of ANOVA, normality was not provided. Because of this, ANOVA can not be performed. Instead of ANOVA, Non-parametric Kruskal was used to see the differences between median systolic blood pressure in each age group. data_wo_na["cigrange1"].value_counts() cr0 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "0"] cr1 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "1-3"] cr2 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "4-7"] cr3 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "8-11"] cr4 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "12-15"] cr5 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "16-19"] cr6 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "20-23"] cr7 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "24-27"] cr8 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "28-31"] cr9 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "31-39"] cr10 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "40-49"] cr11 = data_wo_na["sysBP"][data_wo_na["cigrange1"] == "50+"] cr0 = cr0.append(cr1) cr2 = cr2.append(cr3) cr4 = cr4.append(cr5) cr6 = cr6.append(cr8) cr8 = cr8.append(cr10) cr10 = cr10.append(cr11) # perform Kruskal-Wallis Test stats.kruskal(cr0, cr2, cr4, cr6, cr8, cr10) # Since the p-value of the Kruskal Wallis H Test is smaller than 0.05, the null hypothesis can be rejected. This means the median systolic blood pressure differs by the number of cigarettes smoked in a day. # 3) Does smoking status and the number of cigarettes smoked change by gender, age and education? x, y, hue = "currentSmoker", "proportion", "male" hue_order = ["Male", "Female"] ( data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index() .pipe((sns.barplot, "data"), x=x, y=y, hue=hue, palette=palette) ) # The above graph shows the proportion of people in the status smokers and non-smokers while the colors show the gender in each group. In this data, almost 60 percent of people who are smokers are female while more than 60 percent of people who are not smokers are male. x, y, hue = "currentSmoker", "proportion", "agerange" hue_order = ["30-39", "40-49", "50-59", "60-69", "70-79"] ( data[x] .groupby(data[hue]) .value_counts(normalize=True) .rename(y) .reset_index() .pipe((sns.barplot, "data"), x=x, y=y, hue=hue, palette=palette) ) # Similar to the previous graph, the above graph shows the proportion for smoking status. But in this case, it shows the proportion of age groups instead of gender. In the x axis, 0 represents non-smokers while 1 represents smokers. # All people in the age group 70-79 are smokers by this graph. But it is not correct to conclude with this result since there are only 2 people in that group in the data. # In the age groups 60-69 and 50-59, the majority of people are smokers while for the age groups 40-49 and 30-39 it is the opposite. But generally, the highest percentage of people who are smokers are in the 60-69 age group if 70-79 will not be included. sns.catplot( x="cigrange", kind="count", hue="male", aspect=1.5, data=data, palette=palette ) # The range of cigarettes smoked in a day and gender graph shows the following conclusions; # * Women in the data who don't smoke are more than 2 times of men who are non-smokers. Also, it can be said that the people who don't smoke are the majority. # * In the range of 1-15 cigarettes smoked in a day, the majority are women. # * In the range of 16-19 and 31-39 cigarettes smoked in a day, there is not any female. # * In the range of 20-40+ cigarettes smoked in a day, the majority are men. # A simple conclusion can be made by looking at these results. And this is that men tend to smoke more cigarettes than women in this data. # EDUCATION from matplotlib import cm # Prepare Data df = data.groupby("education").size() # Make the plot with pandas df.plot(kind="pie", subplots=True, figsize=(8, 8), cmap="crest", autopct="%1.1f%%") plt.title("Pie Chart of Education") plt.ylabel("") plt.show() sns.catplot( x="education", y="cigsPerDay", kind="bar", aspect=1.5, data=data, palette=palette ) # Actually, it is expected that while education levels go higher the number of cigarettes smoked per day will decrease. But above bar plot shows that there is not a relationship like that in this data. The education level that has the most cigarettes in a day is level 2 while the second one is level 4 and the least is level 3. sns.catplot( x="education", hue="currentSmoker", kind="count", aspect=1.5, data=data, palette=palette, ) # The above plot shows the frequencies of smoking status in each education level. The highest percentage of smokers are in the education level 2 while the second one is in the level 4 and the least one is in the level 3. # 4) Does age affect other variables? # HEART RATE sns.catplot( x="agerange", y="heartRate", aspect=1.5, kind="box", data=data_wo_na, palette="viridis", ) # The medians of heart rate are almost the same for all age groups except 70-79. In each group, the distribution looks symmetric. The range for the group 70-79 is the smallest but since there are only 2 people in that group, it doesn't mean anything concrete. # It is known that the heart rate is not normal. So the assumption of ANOVA, normality was not provided. Because of this, ANOVA can not be performed. Instead of ANOVA, Non-parametric Kruskal was used to see the differences between median heart rates in each age group. data_wo_na["agerange"].value_counts() ar3 = data_wo_na["heartRate"][data_wo_na["agerange"] == "30-39"] ar4 = data_wo_na["heartRate"][data_wo_na["agerange"] == "40-49"] ar5 = data_wo_na["heartRate"][data_wo_na["agerange"] == "50-59"] ar6 = data_wo_na["heartRate"][data_wo_na["agerange"] == "60-69"] ar7 = data_wo_na["heartRate"][data_wo_na["agerange"] == "70-79"] # perform Kruskal-Wallis Test stats.kruskal(ar3, ar4, ar5, ar6, ar7) # Since the p-value of the Kruskal Wallis H Test is greater than 0.05, the null hypothesis cannot be rejected. This means heart rates don't change by age groups. # SYSTOLIC BLOOD PRESSURE sns.catplot( x="agerange", y="sysBP", aspect=1.5, kind="box", data=data_wo_na, palette="viridis" ) # Median of systolic blood pressure, interquartile ranges, minimum and maximum values are getting higher until the group 70-79 except for the 50-59's min value. The distributions of all groups look symmetric. data_wo_na["agerange"].value_counts() ar3 = data_wo_na["sysBP"][data_wo_na["agerange"] == "30-39"] ar4 = data_wo_na["sysBP"][data_wo_na["agerange"] == "40-49"] ar5 = data_wo_na["sysBP"][data_wo_na["agerange"] == "50-59"] ar6 = data_wo_na["sysBP"][data_wo_na["agerange"] == "60-69"] ar7 = data_wo_na["sysBP"][data_wo_na["agerange"] == "70-79"] # perform Kruskal-Wallis Test stats.kruskal(ar3, ar4, ar5, ar6, ar7) # Since the p-value of the Kruskal Wallis H Test is smaller than 0.05, the null hypothesis can be rejected. This means systolic blood pressure differ by age groups. sns.catplot( x="agerange", y="glucose", aspect=1.5, kind="box", data=data_wo_na, palette="viridis", ) # Almost all desciptives are equal for each group. plt.hist(data_wo_na["glucose"]) # By the above histogram, it can be seen that glucose is not normal. So the assumption of ANOVA, normality was not provided. Because of this, ANOVA can not be performed. Instead of ANOVA, Non-parametric Kruskal was used to see the differences between median glucose in each age group. data_wo_na["agerange"].value_counts() ar3 = data_wo_na["glucose"][data_wo_na["agerange"] == "30-39"] ar4 = data_wo_na["glucose"][data_wo_na["agerange"] == "40-49"] ar5 = data_wo_na["glucose"][data_wo_na["agerange"] == "50-59"] ar6 = data_wo_na["glucose"][data_wo_na["agerange"] == "60-69"] ar7 = data_wo_na["glucose"][data_wo_na["agerange"] == "70-79"] # perform Kruskal-Wallis Test stats.kruskal(ar3, ar4, ar5, ar6, ar7) # Since the p-value of the Kruskal Wallis H Test is smaller than 0.05, the null hypothesis can be rejected. This means glucose differ by age groups. # **PRE-PROCESSING** plt.rc("font", size=14) sns.set(style="white") sns.set(style="whitegrid", color_codes=True) data_wo_na = data_wo_na.rename(columns={"TenYearCHD": "y"}) data_wo_na = data_wo_na.drop(columns=["agerange", "cigrange1"]) education = pd.get_dummies(data_wo_na["education"]) education.columns = ["education_1", "education_2", "education_3", "education_4"] education data_wo_na = pd.concat([data_wo_na, education], axis=1) data_wo_na = data_wo_na.drop(columns="education") data_wo_na # OUTLIER DETECTION datanum = data_wo_na[ ["cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].copy() datacat = data_wo_na.drop( columns=["cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ).copy() Q1 = datanum.quantile(0.25) Q3 = datanum.quantile(0.75) IQR = Q3 - Q1 print(IQR) datanum = datanum[ ~((datanum < (Q1 - 1.5 * IQR)) | (datanum > (Q3 + 1.5 * IQR))).any(axis=1) ] datanum.shape datanumcat = pd.concat([datacat, datanum], axis=1) datanumcat = datanumcat.dropna() datanumcat[ ["age", "cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].describe() # Descriptive statistics after deleting outliers # MULTICOLLINEARITY CHECK # Correlation plt.figure(figsize=(6, 5), dpi=80) sns.heatmap( datanumcat[["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"]].corr(), xticklabels=datanumcat[["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"]] .corr() .columns, yticklabels=datanumcat[["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"]] .corr() .columns, cmap="viridis", center=0, annot=True, ) # Decorations plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.show() # There is a strong correlation between systolic and diastolic blood pressures. But all other variables have weak correlations between each other. # Import library for VIF from statsmodels.stats.outliers_influence import variance_inflation_factor def calc_vif(X): # Calculating VIF vif = pd.DataFrame() vif["variables"] = X.columns vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])] return vif vif_data = datanumcat[ ["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].copy() # Burada standardized etmeden önceki VIF valueları da göstermek lazım calc_vif(vif_data) # In order to see the multicollinearity between variables, their VIF values were checked. They should be less than 5 or 10. In this analysis, the threshold of VIF values was decided as 10. Since VIF values are much higher in the above table, scaling or another method has to be applied. # SCALING from numpy import asarray from sklearn.preprocessing import MinMaxScaler # define min max scaler scaler = MinMaxScaler() # transform data datanum2 = datanumcat[ ["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].copy() datanum2 = pd.DataFrame(scaler.fit_transform(datanum2)) print(datanum2) datanum2.columns = ["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] datacat2 = datanumcat.drop( columns=["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ).copy() data_log = pd.concat([datanum2, datacat2], axis=1) data_log = data_log.dropna() vif_data = data_log[ ["totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"] ].copy() # Burada standardized etmeden önceki VIF valueları da göstermek lazım calc_vif(vif_data) # After deleting diaBP vif_data = data_log[ ["totChol", "sysBP", "BMI", "heartRate", "glucose"] ].copy() # Burada standardized etmeden önceki VIF valueları da göstermek lazım calc_vif(vif_data) # After scaling and elimination, VIF values became less than 10. So, there is no multicollinearity between variables now. data_log = data_log.drop(columns="diaBP", axis=1) data_log data_final = data_log.copy() data_final.columns.values X = data_final.loc[:, data_final.columns != "y"] y = data_final.loc[:, data_final.columns == "y"] # OVERSAMPLING FOR IMBALANCED DATA from collections import Counter from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler(sampling_strategy=1, random_state=42) X, y = ros.fit_resample(X, y) print("Resampled dataset shape %s" % Counter(y)) # **NAIVE BAYES** from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score model = GaussianNB() model.fit(X, y) X = data_log.drop("y", axis=1).copy() y = data_log["y"].copy() X, y = ros.fit_resample(X, y) X = pd.DataFrame(X) X.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) # param_grid_nb = { # 'var_smoothing': np.logspace(0,-9, num=100) # } # grid_search= GridSearchCV(GaussianNB(), param_grid_nb,cv=7) # grid_search.fit(X_train,y_train) # grid_search.best_params_ modelnb = GaussianNB(var_smoothing=0.0005336699231206307) modelnb.fit(X_train, y_train) y2_modelnb = modelnb.predict(X_test) accuracy_score(y_test, y2_modelnb) from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score cf = confusion_matrix(y_test, y2_modelnb) print(classification_report(y_test, y2_modelnb)) group_names = ["True Negative", "False Positive", "False Negative", "True Positive"] group_counts = ["{0:0.0f}".format(value) for value in cf.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)] labels = [ f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages) ] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf, annot=labels, fmt="", cmap=palette) perf_nb = pd.DataFrame( { "Train_Score": modelnb.score(X_train, y_train), "Test_Score": modelnb.score(X_test, y_test), "Precision_Score": precision_score(y_test, y2_modelnb), "Recall_Score": recall_score(y_test, y2_modelnb), "F1_Score": f1_score(y_test, y2_modelnb), }, index=["Naives Bayes"], ) # **KNN** from sklearn.neighbors import KNeighborsClassifier X = data_log.drop("y", axis=1).copy() y = data_log["y"].copy() X, y = ros.fit_resample(X, y) X = pd.DataFrame(X) X.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) # from sklearn.model_selection import GridSearchCV # gridSearchParameters = {'n_neighbors' : [i for i in range(3,10,2)], # 'weights' : ['uniform', 'distance'], # 'metric' : ['euclidean','manhattan','minkowski','hamming'] # } # grid = GridSearchCV(KNeighborsClassifier(), gridSearchParameters, cv=7) # grid.fit(X_train,y_train) # grid.best_params_ modelknn = KNeighborsClassifier(metric="hamming", n_neighbors=3, weights="distance") modelknn.fit(X_train, y_train) y2_modelknn = modelknn.predict(X_test) accuracy_score(y_test, y2_modelknn) cf = confusion_matrix(y_test, y2_modelknn) print(classification_report(y_test, y2_modelknn)) group_names = ["True Negative", "False Positive", "False Negative", "True Positive"] group_counts = ["{0:0.0f}".format(value) for value in cf.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)] labels = [ f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages) ] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf, annot=labels, fmt="", cmap=palette) perf_knn = pd.DataFrame( { "Train_Score": modelknn.score(X_train, y_train), "Test_Score": modelknn.score(X_test, y_test), "Precision_Score": precision_score(y_test, y2_modelknn), "Recall_Score": recall_score(y_test, y2_modelknn), "F1_Score": f1_score(y_test, y2_modelknn), }, index=["KNN"], ) # **LOGISTIC REGRESSION** data_log["y"].value_counts() count_no_risk = len(data_log[data_log["y"] == 0]) count_risk = len(data_log[data_log["y"] == 1]) pct_of_no_risk = count_no_risk / (count_no_risk + count_risk) print("percentage of no risk", pct_of_no_risk * 100) pct_of_risk = count_risk / (count_no_risk + count_risk) print("percentage of risk", pct_of_risk * 100) data_log data_final = data_log.copy() data_final.columns.values X = data_final.loc[:, data_final.columns != "y"] y = data_final.loc[:, data_final.columns == "y"] # OVERSAMPLING FOR IMBALANCED DATA from collections import Counter from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler(sampling_strategy=1, random_state=42) X, y = ros.fit_resample(X, y) print("Resampled dataset shape %s" % Counter(y)) X = pd.DataFrame( X, columns=[ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ], ) X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) pd.DataFrame(y).value_counts() X_for_logistic = data_final.loc[:, data_final.columns != "y"] y_for_logistic = data_final.loc[:, data_final.columns == "y"] X_for_logistic, y_for_logistic = ros.fit_resample(X_for_logistic, y_for_logistic) X_for_logistic = pd.DataFrame(X_for_logistic) X_for_logistic.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X_for_logistic[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X_for_logistic[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train_lg, X_test_lg, y_train_lg, y_test_lg = train_test_split( X_for_logistic, y_for_logistic, test_size=0.25, random_state=42, stratify=y_for_logistic, ) columns = X_train.columns os_data_X = pd.DataFrame(X_train, columns=columns) os_data_y = pd.DataFrame(y_train) os_data_y.columns = ["y"] os_data_y.value_counts() print("length of oversampled data is ", len(os_data_X)) print( "Number of no subscription in oversampled data", len(os_data_y[os_data_y["y"] == 0]) ) print("Number of subscription", len(os_data_y[os_data_y["y"] == 1])) print( "Proportion of no subscription data in oversampled data is ", len(os_data_y[os_data_y["y"] == 0]) / len(os_data_X), ) print( "Proportion of subscription data in oversampled data is ", len(os_data_y[os_data_y["y"] == 1]) / len(os_data_X), ) # from sklearn.model_selection import GridSearchCV # grid= dict(solver = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'], # C= [0.001,0.01,0.1,1,10,100,1000], # penalty= ['none', 'l1', 'l2', 'elasticnet'])# l1 lasso l2 ridge # logreg=LogisticRegression() # logreg_cv=GridSearchCV(logreg,grid,cv=10) # logreg_cv.fit(X_train_lg,y_train_lg) # print("tuned hpyerparameters :(best parameters) ",logreg_cv.best_params_) # print("accuracy :",logreg_cv.best_score_) # By the GridSearchCV method, the best hyperparameters were found and they were used in the logistic model. data_final_vars = data_final.columns.values.tolist() y = ["y"] X = [i for i in data_final_vars if i not in y] from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression logreg = LogisticRegression(C=0.1, penalty="l1", solver="saga") rfe = RFE(logreg, 20) rfe = rfe.fit(os_data_X, os_data_y.values.ravel()) print(rfe.support_) print(rfe.ranking_) X = os_data_X y = os_data_y["y"] import statsmodels.api as sm logit_model = sm.Logit(np.asarray(y), X.astype(float)) result = logit_model.fit() print(result.summary2()) # Since there are variables that have p-values greater than 0.05, there should be an elimination among independent variables. X = os_data_X.drop( columns=[ "BPMeds", "diabetes", "sysBP", "totChol", "currentSmoker", "prevalentStroke", "heartRate", "glucose", ], axis=1, ).copy() y = os_data_y["y"] logit_model = sm.Logit(np.asarray(y), X.astype(float)) result = logit_model.fit() print(result.summary2()) from sklearn.linear_model import LogisticRegression from sklearn import metrics X_train, X_test, y_train, y_test = train_test_split( X_for_logistic, y_for_logistic, test_size=0.25, random_state=42, stratify=y_for_logistic, ) logreg = LogisticRegression(C=0.1, penalty="l1", solver="saga") logreg.fit(X_train, y_train) y_pred = logreg.predict(X_test) from sklearn.metrics import confusion_matrix cf = confusion_matrix(y_test, y_pred) print(cf) from sklearn.metrics import classification_report l_cr = classification_report(y_test, y_pred, output_dict=True) print(classification_report(y_test, y_pred)) perf_lr = pd.DataFrame( { "Train_Score": logreg.score(X_train, y_train), "Test_Score": logreg.score(X_test, y_test), "Precision_Score": precision_score(y_test, y_pred), "Recall_Score": recall_score(y_test, y_pred), "F1_Score": f1_score(y_test, y_pred), }, index=["Logistic Regression"], ) # **DECISION TREE** # Load libraries import pandas as pd from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier from sklearn.model_selection import train_test_split # Import train_test_split function from sklearn import ( metrics, ) # Import scikit-learn metrics module for accuracy calculation X = data_log.drop(columns="y") # Features y = data_log["y"] # Target variable X, y = ros.fit_resample(X, y) X = pd.DataFrame(X) X.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) # param_dict={"criterion" : ['gini', 'entropy'], "max_depth":range(1,10),"min_samples_split":range(1,10),"min_samples_leaf":range(1,5) } # clf_GS = GridSearchCV( DecisionTreeClassifier(),param_grid= param_dict,cv=10,verbose=1,n_jobs=-1) # clf_GS.fit(X_train, y_train) # clf_GS.best_params_ # clf_GS.best_estimator_ # Create Decision Tree classifer object clf = DecisionTreeClassifier( ccp_alpha=0.0, class_weight=None, criterion="gini", max_depth=9, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=3, min_weight_fraction_leaf=0.0, presort="deprecated", random_state=None, splitter="best", ) # Train Decision Tree Classifer clf = clf.fit(X_train, y_train) # Predict the response for test dataset y_pred = clf.predict(X_test) # Model Accuracy, how often is the classifier correct? print("Accuracy:", metrics.accuracy_score(y_test, y_pred)) dt_cr = classification_report(y_test, y_pred, output_dict=True) perf_dt = pd.DataFrame( { "Train_Score": clf.score(X_train, y_train), "Test_Score": clf.score(X_test, y_test), "Precision_Score": precision_score(y_test, y_pred), "Recall_Score": recall_score(y_test, y_pred), "F1_Score": f1_score(y_test, y_pred), }, index=["Decision Tree"], ) cf = confusion_matrix(y_test, y_pred) group_names = ["True Negative", "False Positive", "False Negative", "True Positive"] group_counts = ["{0:0.0f}".format(value) for value in cf.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)] labels = [ f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages) ] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf, annot=labels, fmt="", cmap=palette) print(classification_report(y_test, y_pred)) # **RANDOM FOREST** X = data_log.drop("y", axis=1) y = data_log["y"].copy() X, y = ros.fit_resample(X, y) X = pd.DataFrame(X) X.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) # from sklearn.model_selection import RandomizedSearchCV # number of trees in random forest # n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)] # number of features at every split # max_features = ['auto', 'sqrt'] # max depth # max_depth = [int(x) for x in np.linspace(100, 500, num = 11)] max_depth.append(None) # create random grid # random_grid = { 'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth } # Random search of parameters # rfc_random = RandomizedSearchCV(estimator = rfc, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1) # Fit the model # rfc_random.fit(X_train, y_train) # print results # print(rfc_random.best_params_) from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score rfc = RandomForestClassifier(n_estimators=200, max_features="sqrt", max_depth=None) rfc.fit(X_train, y_train) rfc_predict = rfc.predict(X_test) rfc_cv_score = cross_val_score(rfc, X, y, cv=10, scoring="roc_auc") print("=== Confusion Matrix ===") print(confusion_matrix(y_test, rfc_predict)) print("\n") print("=== Classification Report ===") print(classification_report(y_test, rfc_predict)) print("\n") print("=== All AUC Scores ===") print(rfc_cv_score) print("\n") print("=== Mean AUC Score ===") print("Mean AUC Score - Random Forest: ", rfc_cv_score.mean()) sorted_idx = rfc.feature_importances_.argsort() plt.barh(data_log.columns[sorted_idx], rfc.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") # The most important 5 features are respectively age, body mass index, total cholesterol, systolic blood pressure, and glucose. cf = confusion_matrix(y_test, rfc_predict) group_names = ["True Negative", "False Positive", "False Negative", "True Positive"] group_counts = ["{0:0.0f}".format(value) for value in cf.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)] labels = [ f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages) ] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf, annot=labels, fmt="", cmap=palette) perf_rf = pd.DataFrame( { "Train_Score": rfc.score(X_train, y_train), "Test_Score": rfc.score(X_test, y_test), "Precision_Score": precision_score(y_test, rfc_predict), "Recall_Score": recall_score(y_test, rfc_predict), "F1_Score": f1_score(y_test, rfc_predict), }, index=["Random Forest"], ) # **SUPPORT VECTOR MACHINE** X = data_log.drop("y", axis=1).copy() y = data_log["y"].copy() X, y = ros.fit_resample(X, y) X = pd.DataFrame(X) X.columns = [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ] = X[ [ "totChol", "sysBP", "BMI", "heartRate", "glucose", "male", "age", "currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp", "diabetes", "education_1", "education_2", "education_3", "education_4", "cigsPerDay", ] ].astype( "category" ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42, stratify=y ) from sklearn.svm import SVC # defining parameter range # param_grid = {'C': [0.1, 1, 10, 100, 1000], # 'gamma': [1, 0.1, 0.01, 0.001, 0.0001], # 'kernel': ['rbf']} # grid = GridSearchCV(SVC(), param_grid, refit = True, verbose = 3) # fitting the model for grid search # grid.fit(X_train, y_train) from sklearn.svm import SVC svclassifier = SVC(kernel="rbf", C=100, gamma=1) svclassifier.fit(X_train, y_train) y_pred = svclassifier.predict(X_test) from sklearn.metrics import classification_report, confusion_matrix cf = confusion_matrix(y_test, y_pred) print(classification_report(y_test, y_pred)) perf_svm = pd.DataFrame( { "Train_Score": svclassifier.score(X_train, y_train), "Test_Score": svclassifier.score(X_test, y_test), "Precision_Score": precision_score(y_test, y_pred), "Recall_Score": recall_score(y_test, y_pred), "F1_Score": f1_score(y_test, y_pred), }, index=["SVM"], ) group_names = ["True Negative", "False Positive", "False Negative", "True Positive"] group_counts = ["{0:0.0f}".format(value) for value in cf.flatten()] group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)] labels = [ f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages) ] labels = np.asarray(labels).reshape(2, 2) sns.heatmap(cf, annot=labels, fmt="", cmap=palette) A = pd.concat([perf_nb, perf_lr, perf_dt, perf_rf, perf_knn, perf_svm]) A
[{"heart-disease-prediction-using-logistic-regression/framingham.csv": {"column_names": "[\"male\", \"age\", \"education\", \"currentSmoker\", \"cigsPerDay\", \"BPMeds\", \"prevalentStroke\", \"prevalentHyp\", \"diabetes\", \"totChol\", \"sysBP\", \"diaBP\", \"BMI\", \"heartRate\", \"glucose\", \"TenYearCHD\"]", "column_data_types": "{\"male\": \"int64\", \"age\": \"int64\", \"education\": \"float64\", \"currentSmoker\": \"int64\", \"cigsPerDay\": \"float64\", \"BPMeds\": \"float64\", \"prevalentStroke\": \"int64\", \"prevalentHyp\": \"int64\", \"diabetes\": \"int64\", \"totChol\": \"float64\", \"sysBP\": \"float64\", \"diaBP\": \"float64\", \"BMI\": \"float64\", \"heartRate\": \"float64\", \"glucose\": \"float64\", \"TenYearCHD\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4238 entries, 0 to 4237\nData columns (total 16 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 male 4238 non-null int64 \n 1 age 4238 non-null int64 \n 2 education 4133 non-null float64\n 3 currentSmoker 4238 non-null int64 \n 4 cigsPerDay 4209 non-null float64\n 5 BPMeds 4185 non-null float64\n 6 prevalentStroke 4238 non-null int64 \n 7 prevalentHyp 4238 non-null int64 \n 8 diabetes 4238 non-null int64 \n 9 totChol 4188 non-null float64\n 10 sysBP 4238 non-null float64\n 11 diaBP 4238 non-null float64\n 12 BMI 4219 non-null float64\n 13 heartRate 4237 non-null float64\n 14 glucose 3850 non-null float64\n 15 TenYearCHD 4238 non-null int64 \ndtypes: float64(9), int64(7)\nmemory usage: 529.9 KB\n", "summary": "{\"male\": {\"count\": 4238.0, \"mean\": 0.42921189240207647, \"std\": 0.4950220855364925, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"age\": {\"count\": 4238.0, \"mean\": 49.58494572911751, \"std\": 8.572159925118484, \"min\": 32.0, \"25%\": 42.0, \"50%\": 49.0, \"75%\": 56.0, \"max\": 70.0}, \"education\": {\"count\": 4133.0, \"mean\": 1.9789499153157513, \"std\": 1.019790689312203, \"min\": 1.0, \"25%\": 1.0, \"50%\": 2.0, \"75%\": 3.0, \"max\": 4.0}, \"currentSmoker\": {\"count\": 4238.0, \"mean\": 0.49410099103350635, \"std\": 0.5000241969070368, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"cigsPerDay\": {\"count\": 4209.0, \"mean\": 9.003088619624615, \"std\": 11.92009358782776, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 20.0, \"max\": 70.0}, \"BPMeds\": {\"count\": 4185.0, \"mean\": 0.02962962962962963, \"std\": 0.16958356790200033, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"prevalentStroke\": {\"count\": 4238.0, \"mean\": 0.005899008966493629, \"std\": 0.07658717064585817, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"prevalentHyp\": {\"count\": 4238.0, \"mean\": 0.31052383199622463, \"std\": 0.4627626956737016, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"diabetes\": {\"count\": 4238.0, \"mean\": 0.025719679093912224, \"std\": 0.15831642786899552, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"totChol\": {\"count\": 4188.0, \"mean\": 236.72158548233045, \"std\": 44.590334318749456, \"min\": 107.0, \"25%\": 206.0, \"50%\": 234.0, \"75%\": 263.0, \"max\": 696.0}, \"sysBP\": {\"count\": 4238.0, \"mean\": 132.35240679565834, \"std\": 22.038096643544584, \"min\": 83.5, \"25%\": 117.0, \"50%\": 128.0, \"75%\": 144.0, \"max\": 295.0}, \"diaBP\": {\"count\": 4238.0, \"mean\": 82.89346389806512, \"std\": 11.9108496002255, \"min\": 48.0, \"25%\": 75.0, \"50%\": 82.0, \"75%\": 89.875, \"max\": 142.5}, \"BMI\": {\"count\": 4219.0, \"mean\": 25.80200758473572, \"std\": 4.080111062409028, \"min\": 15.54, \"25%\": 23.07, \"50%\": 25.4, \"75%\": 28.04, \"max\": 56.8}, \"heartRate\": {\"count\": 4237.0, \"mean\": 75.87892376681614, \"std\": 12.0265963516126, \"min\": 44.0, \"25%\": 68.0, \"50%\": 75.0, \"75%\": 83.0, \"max\": 143.0}, \"glucose\": {\"count\": 3850.0, \"mean\": 81.96675324675324, \"std\": 23.959998189237478, \"min\": 40.0, \"25%\": 71.0, \"50%\": 78.0, \"75%\": 87.0, \"max\": 394.0}, \"TenYearCHD\": {\"count\": 4238.0, \"mean\": 0.1519584709768759, \"std\": 0.35902299199027266, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}}", "examples": "{\"male\":{\"0\":1,\"1\":0,\"2\":1,\"3\":0},\"age\":{\"0\":39,\"1\":46,\"2\":48,\"3\":61},\"education\":{\"0\":4.0,\"1\":2.0,\"2\":1.0,\"3\":3.0},\"currentSmoker\":{\"0\":0,\"1\":0,\"2\":1,\"3\":1},\"cigsPerDay\":{\"0\":0.0,\"1\":0.0,\"2\":20.0,\"3\":30.0},\"BPMeds\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"prevalentStroke\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"prevalentHyp\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"diabetes\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"totChol\":{\"0\":195.0,\"1\":250.0,\"2\":245.0,\"3\":225.0},\"sysBP\":{\"0\":106.0,\"1\":121.0,\"2\":127.5,\"3\":150.0},\"diaBP\":{\"0\":70.0,\"1\":81.0,\"2\":80.0,\"3\":95.0},\"BMI\":{\"0\":26.97,\"1\":28.73,\"2\":25.34,\"3\":28.58},\"heartRate\":{\"0\":80.0,\"1\":95.0,\"2\":75.0,\"3\":65.0},\"glucose\":{\"0\":77.0,\"1\":76.0,\"2\":70.0,\"3\":103.0},\"TenYearCHD\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1}}"}}]
true
1
<start_data_description><data_path>heart-disease-prediction-using-logistic-regression/framingham.csv: <column_names> ['male', 'age', 'education', 'currentSmoker', 'cigsPerDay', 'BPMeds', 'prevalentStroke', 'prevalentHyp', 'diabetes', 'totChol', 'sysBP', 'diaBP', 'BMI', 'heartRate', 'glucose', 'TenYearCHD'] <column_types> {'male': 'int64', 'age': 'int64', 'education': 'float64', 'currentSmoker': 'int64', 'cigsPerDay': 'float64', 'BPMeds': 'float64', 'prevalentStroke': 'int64', 'prevalentHyp': 'int64', 'diabetes': 'int64', 'totChol': 'float64', 'sysBP': 'float64', 'diaBP': 'float64', 'BMI': 'float64', 'heartRate': 'float64', 'glucose': 'float64', 'TenYearCHD': 'int64'} <dataframe_Summary> {'male': {'count': 4238.0, 'mean': 0.42921189240207647, 'std': 0.4950220855364925, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'age': {'count': 4238.0, 'mean': 49.58494572911751, 'std': 8.572159925118484, 'min': 32.0, '25%': 42.0, '50%': 49.0, '75%': 56.0, 'max': 70.0}, 'education': {'count': 4133.0, 'mean': 1.9789499153157513, 'std': 1.019790689312203, 'min': 1.0, '25%': 1.0, '50%': 2.0, '75%': 3.0, 'max': 4.0}, 'currentSmoker': {'count': 4238.0, 'mean': 0.49410099103350635, 'std': 0.5000241969070368, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'cigsPerDay': {'count': 4209.0, 'mean': 9.003088619624615, 'std': 11.92009358782776, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 20.0, 'max': 70.0}, 'BPMeds': {'count': 4185.0, 'mean': 0.02962962962962963, 'std': 0.16958356790200033, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'prevalentStroke': {'count': 4238.0, 'mean': 0.005899008966493629, 'std': 0.07658717064585817, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'prevalentHyp': {'count': 4238.0, 'mean': 0.31052383199622463, 'std': 0.4627626956737016, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'diabetes': {'count': 4238.0, 'mean': 0.025719679093912224, 'std': 0.15831642786899552, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'totChol': {'count': 4188.0, 'mean': 236.72158548233045, 'std': 44.590334318749456, 'min': 107.0, '25%': 206.0, '50%': 234.0, '75%': 263.0, 'max': 696.0}, 'sysBP': {'count': 4238.0, 'mean': 132.35240679565834, 'std': 22.038096643544584, 'min': 83.5, '25%': 117.0, '50%': 128.0, '75%': 144.0, 'max': 295.0}, 'diaBP': {'count': 4238.0, 'mean': 82.89346389806512, 'std': 11.9108496002255, 'min': 48.0, '25%': 75.0, '50%': 82.0, '75%': 89.875, 'max': 142.5}, 'BMI': {'count': 4219.0, 'mean': 25.80200758473572, 'std': 4.080111062409028, 'min': 15.54, '25%': 23.07, '50%': 25.4, '75%': 28.04, 'max': 56.8}, 'heartRate': {'count': 4237.0, 'mean': 75.87892376681614, 'std': 12.0265963516126, 'min': 44.0, '25%': 68.0, '50%': 75.0, '75%': 83.0, 'max': 143.0}, 'glucose': {'count': 3850.0, 'mean': 81.96675324675324, 'std': 23.959998189237478, 'min': 40.0, '25%': 71.0, '50%': 78.0, '75%': 87.0, 'max': 394.0}, 'TenYearCHD': {'count': 4238.0, 'mean': 0.1519584709768759, 'std': 0.35902299199027266, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}} <dataframe_info> RangeIndex: 4238 entries, 0 to 4237 Data columns (total 16 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 male 4238 non-null int64 1 age 4238 non-null int64 2 education 4133 non-null float64 3 currentSmoker 4238 non-null int64 4 cigsPerDay 4209 non-null float64 5 BPMeds 4185 non-null float64 6 prevalentStroke 4238 non-null int64 7 prevalentHyp 4238 non-null int64 8 diabetes 4238 non-null int64 9 totChol 4188 non-null float64 10 sysBP 4238 non-null float64 11 diaBP 4238 non-null float64 12 BMI 4219 non-null float64 13 heartRate 4237 non-null float64 14 glucose 3850 non-null float64 15 TenYearCHD 4238 non-null int64 dtypes: float64(9), int64(7) memory usage: 529.9 KB <some_examples> {'male': {'0': 1, '1': 0, '2': 1, '3': 0}, 'age': {'0': 39, '1': 46, '2': 48, '3': 61}, 'education': {'0': 4.0, '1': 2.0, '2': 1.0, '3': 3.0}, 'currentSmoker': {'0': 0, '1': 0, '2': 1, '3': 1}, 'cigsPerDay': {'0': 0.0, '1': 0.0, '2': 20.0, '3': 30.0}, 'BPMeds': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'prevalentStroke': {'0': 0, '1': 0, '2': 0, '3': 0}, 'prevalentHyp': {'0': 0, '1': 0, '2': 0, '3': 1}, 'diabetes': {'0': 0, '1': 0, '2': 0, '3': 0}, 'totChol': {'0': 195.0, '1': 250.0, '2': 245.0, '3': 225.0}, 'sysBP': {'0': 106.0, '1': 121.0, '2': 127.5, '3': 150.0}, 'diaBP': {'0': 70.0, '1': 81.0, '2': 80.0, '3': 95.0}, 'BMI': {'0': 26.97, '1': 28.73, '2': 25.34, '3': 28.58}, 'heartRate': {'0': 80.0, '1': 95.0, '2': 75.0, '3': 65.0}, 'glucose': {'0': 77.0, '1': 76.0, '2': 70.0, '3': 103.0}, 'TenYearCHD': {'0': 0, '1': 0, '2': 0, '3': 1}} <end_description>
19,598
0
22,413
19,598
69696176
<jupyter_start><jupyter_text>Real estate price prediction Kaggle dataset identifier: real-estate-price-prediction <jupyter_code>import pandas as pd df = pd.read_csv('real-estate-price-prediction/Real estate.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 414 entries, 0 to 413 Data columns (total 8 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 No 414 non-null int64 1 X1 transaction date 414 non-null float64 2 X2 house age 414 non-null float64 3 X3 distance to the nearest MRT station 414 non-null float64 4 X4 number of convenience stores 414 non-null int64 5 X5 latitude 414 non-null float64 6 X6 longitude 414 non-null float64 7 Y house price of unit area 414 non-null float64 dtypes: float64(6), int64(2) memory usage: 26.0 KB <jupyter_text>Examples: { "No": 1.0, "X1 transaction date": 2012.917, "X2 house age": 32.0, "X3 distance to the nearest MRT station": 84.87882, "X4 number of convenience stores": 10.0, "X5 latitude": 24.98298, "X6 longitude": 121.54024, "Y house price of unit area": 37.9 } { "No": 2.0, "X1 transaction date": 2012.917, "X2 house age": 19.5, "X3 distance to the nearest MRT station": 306.5947, "X4 number of convenience stores": 9.0, "X5 latitude": 24.98034, "X6 longitude": 121.53951, "Y house price of unit area": 42.2 } { "No": 3.0, "X1 transaction date": 2013.583, "X2 house age": 13.3, "X3 distance to the nearest MRT station": 561.9845, "X4 number of convenience stores": 5.0, "X5 latitude": 24.98746, "X6 longitude": 121.54391, "Y house price of unit area": 47.3 } { "No": 4.0, "X1 transaction date": 2013.5, "X2 house age": 13.3, "X3 distance to the nearest MRT station": 561.9845, "X4 number of convenience stores": 5.0, "X5 latitude": 24.98746, "X6 longitude": 121.54391, "Y house price of unit area": 54.8 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier from sklearn import ( metrics, ) # Import scikit-learn metrics module for accuracy calculation # # Regression Dataset # Obtaining the dataset: train_reg = pd.read_csv("../input/real-estate-price-prediction/Real estate.csv") train_reg # Checking for data that needs cleaning: train_reg.info() X = train_reg.drop("Y house price of unit area", axis=1) y = train_reg["Y house price of unit area"] # Splitting the dataset: X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # Scaling dataset to be used with linear regression and SVR: from sklearn.preprocessing import StandardScaler scaler = StandardScaler() Sc_X_train = scaler.fit_transform(X_train) Sc_X_test = scaler.fit_transform(X_test) lin_reg = LinearRegression(normalize=True) lin_reg.fit(Sc_X_train, y_train) score_train = lin_reg.score(Sc_X_train, y_train) # Coefficient of determination score_test = lin_reg.score(Sc_X_test, y_test) print("train score: {}".format(score_train)) print("test score: {}".format(score_test)) lin_reg.intercept_ print(lin_reg.coef_) y_pred = lin_reg.predict(Sc_X_test) df = pd.DataFrame({"Actual": y_test, "Predicted": y_pred}) from sklearn import metrics print("Mean Absolute Error:", metrics.mean_absolute_error(y_test, y_pred)) print("Mean Squared Error:", metrics.mean_squared_error(y_test, y_pred)) print("Root Mean Squared Error:", np.sqrt(metrics.mean_squared_error(y_test, y_pred))) from sklearn.svm import SVR reg = SVR(kernel="poly") reg.fit(X_train, y_train) svr_score_train = reg.score(X_train, y_train) # Coefficient of determination svr_score_test = reg.score(X_test, y_test) print("train score: {}".format(svr_score_train)) print("test score: {}".format(svr_score_test)) # # Classification dataset trainC = pd.read_csv("../input/breast-cancer-wisconsin-data/data.csv") trainC.info() X = trainC.drop(["diagnosis", "Unnamed: 32"], axis=1) y = trainC["diagnosis"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # Create Decision Tree classifer object clf = DecisionTreeClassifier(max_depth=2) # Train Decision Tree Classifer clf = clf.fit(X_train, y_train) # Predict the response for test dataset y_pred = clf.predict(X_test) print("Accuracy:", metrics.accuracy_score(y_test, y_pred)) # Import Random Forest Model from sklearn.ensemble import RandomForestClassifier # Create a Gaussian Classifier clf = RandomForestClassifier(n_estimators=100) # Train the model using the training sets y_pred=clf.predict(X_test) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print("Accuracy:", metrics.accuracy_score(y_test, y_pred)) cleanup_nums = {"diagnosis": {"M": 0, "D": 1}} trainC.replace(cleanup_nums, inplace=True) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() Sc_X_train = scaler.fit_transform(X_train) Sc_X_test = scaler.fit_transform(X_test) # Import svm model from sklearn import svm # Create a svm Classifier clf = svm.SVC(kernel="poly") # Linear Kernel # Train the model using the training sets clf.fit(X_train, y_train) # Predict the response for test dataset y_pred = clf.predict(X_test) print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/696/69696176.ipynb
real-estate-price-prediction
quantbruce
[{"Id": 69696176, "ScriptId": 18957160, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4705721, "CreationDate": "08/02/2021 20:21:50", "VersionNumber": 2.0, "Title": "Machine Learning 1", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 140.0, "LinesInsertedFromPrevious": 114.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 26.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93169503, "KernelVersionId": 69696176, "SourceDatasetVersionId": 204267}, {"Id": 93169502, "KernelVersionId": 69696176, "SourceDatasetVersionId": 408}]
[{"Id": 204267, "DatasetId": 88705, "DatasourceVersionId": 215597, "CreatorUserId": 2535960, "LicenseName": "Other (specified in description)", "CreationDate": "12/08/2018 09:13:48", "VersionNumber": 1.0, "Title": "Real estate price prediction", "Slug": "real-estate-price-prediction", "Subtitle": "regression analysis, mutiple regression,linear regression, prediction", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 21968.0, "TotalUncompressedBytes": 21968.0}]
[{"Id": 88705, "CreatorUserId": 2535960, "OwnerUserId": 2535960.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 204267.0, "CurrentDatasourceVersionId": 215597.0, "ForumId": 98263, "Type": 2, "CreationDate": "12/08/2018 09:13:48", "LastActivityDate": "12/08/2018", "TotalViews": 322550, "TotalDownloads": 51026, "TotalVotes": 369, "TotalKernels": 194}]
[{"Id": 2535960, "UserName": "quantbruce", "DisplayName": "Algor_Bruce", "RegisterDate": "11/25/2018", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier from sklearn import ( metrics, ) # Import scikit-learn metrics module for accuracy calculation # # Regression Dataset # Obtaining the dataset: train_reg = pd.read_csv("../input/real-estate-price-prediction/Real estate.csv") train_reg # Checking for data that needs cleaning: train_reg.info() X = train_reg.drop("Y house price of unit area", axis=1) y = train_reg["Y house price of unit area"] # Splitting the dataset: X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # Scaling dataset to be used with linear regression and SVR: from sklearn.preprocessing import StandardScaler scaler = StandardScaler() Sc_X_train = scaler.fit_transform(X_train) Sc_X_test = scaler.fit_transform(X_test) lin_reg = LinearRegression(normalize=True) lin_reg.fit(Sc_X_train, y_train) score_train = lin_reg.score(Sc_X_train, y_train) # Coefficient of determination score_test = lin_reg.score(Sc_X_test, y_test) print("train score: {}".format(score_train)) print("test score: {}".format(score_test)) lin_reg.intercept_ print(lin_reg.coef_) y_pred = lin_reg.predict(Sc_X_test) df = pd.DataFrame({"Actual": y_test, "Predicted": y_pred}) from sklearn import metrics print("Mean Absolute Error:", metrics.mean_absolute_error(y_test, y_pred)) print("Mean Squared Error:", metrics.mean_squared_error(y_test, y_pred)) print("Root Mean Squared Error:", np.sqrt(metrics.mean_squared_error(y_test, y_pred))) from sklearn.svm import SVR reg = SVR(kernel="poly") reg.fit(X_train, y_train) svr_score_train = reg.score(X_train, y_train) # Coefficient of determination svr_score_test = reg.score(X_test, y_test) print("train score: {}".format(svr_score_train)) print("test score: {}".format(svr_score_test)) # # Classification dataset trainC = pd.read_csv("../input/breast-cancer-wisconsin-data/data.csv") trainC.info() X = trainC.drop(["diagnosis", "Unnamed: 32"], axis=1) y = trainC["diagnosis"] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # Create Decision Tree classifer object clf = DecisionTreeClassifier(max_depth=2) # Train Decision Tree Classifer clf = clf.fit(X_train, y_train) # Predict the response for test dataset y_pred = clf.predict(X_test) print("Accuracy:", metrics.accuracy_score(y_test, y_pred)) # Import Random Forest Model from sklearn.ensemble import RandomForestClassifier # Create a Gaussian Classifier clf = RandomForestClassifier(n_estimators=100) # Train the model using the training sets y_pred=clf.predict(X_test) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print("Accuracy:", metrics.accuracy_score(y_test, y_pred)) cleanup_nums = {"diagnosis": {"M": 0, "D": 1}} trainC.replace(cleanup_nums, inplace=True) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() Sc_X_train = scaler.fit_transform(X_train) Sc_X_test = scaler.fit_transform(X_test) # Import svm model from sklearn import svm # Create a svm Classifier clf = svm.SVC(kernel="poly") # Linear Kernel # Train the model using the training sets clf.fit(X_train, y_train) # Predict the response for test dataset y_pred = clf.predict(X_test) print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
[{"real-estate-price-prediction/Real estate.csv": {"column_names": "[\"No\", \"X1 transaction date\", \"X2 house age\", \"X3 distance to the nearest MRT station\", \"X4 number of convenience stores\", \"X5 latitude\", \"X6 longitude\", \"Y house price of unit area\"]", "column_data_types": "{\"No\": \"int64\", \"X1 transaction date\": \"float64\", \"X2 house age\": \"float64\", \"X3 distance to the nearest MRT station\": \"float64\", \"X4 number of convenience stores\": \"int64\", \"X5 latitude\": \"float64\", \"X6 longitude\": \"float64\", \"Y house price of unit area\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 414 entries, 0 to 413\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 No 414 non-null int64 \n 1 X1 transaction date 414 non-null float64\n 2 X2 house age 414 non-null float64\n 3 X3 distance to the nearest MRT station 414 non-null float64\n 4 X4 number of convenience stores 414 non-null int64 \n 5 X5 latitude 414 non-null float64\n 6 X6 longitude 414 non-null float64\n 7 Y house price of unit area 414 non-null float64\ndtypes: float64(6), int64(2)\nmemory usage: 26.0 KB\n", "summary": "{\"No\": {\"count\": 414.0, \"mean\": 207.5, \"std\": 119.6557562342907, \"min\": 1.0, \"25%\": 104.25, \"50%\": 207.5, \"75%\": 310.75, \"max\": 414.0}, \"X1 transaction date\": {\"count\": 414.0, \"mean\": 2013.1489710144926, \"std\": 0.2819672402630115, \"min\": 2012.667, \"25%\": 2012.917, \"50%\": 2013.167, \"75%\": 2013.417, \"max\": 2013.583}, \"X2 house age\": {\"count\": 414.0, \"mean\": 17.71256038647343, \"std\": 11.39248453324253, \"min\": 0.0, \"25%\": 9.025, \"50%\": 16.1, \"75%\": 28.15, \"max\": 43.8}, \"X3 distance to the nearest MRT station\": {\"count\": 414.0, \"mean\": 1083.8856889130436, \"std\": 1262.1095954078512, \"min\": 23.38284, \"25%\": 289.3248, \"50%\": 492.2313, \"75%\": 1454.279, \"max\": 6488.021}, \"X4 number of convenience stores\": {\"count\": 414.0, \"mean\": 4.094202898550725, \"std\": 2.945561805663618, \"min\": 0.0, \"25%\": 1.0, \"50%\": 4.0, \"75%\": 6.0, \"max\": 10.0}, \"X5 latitude\": {\"count\": 414.0, \"mean\": 24.969030072463767, \"std\": 0.012410196590450338, \"min\": 24.93207, \"25%\": 24.963, \"50%\": 24.9711, \"75%\": 24.977455, \"max\": 25.01459}, \"X6 longitude\": {\"count\": 414.0, \"mean\": 121.53336108695655, \"std\": 0.015347183004592205, \"min\": 121.47353, \"25%\": 121.528085, \"50%\": 121.53863, \"75%\": 121.543305, \"max\": 121.56627}, \"Y house price of unit area\": {\"count\": 414.0, \"mean\": 37.980193236714975, \"std\": 13.606487697735314, \"min\": 7.6, \"25%\": 27.7, \"50%\": 38.45, \"75%\": 46.6, \"max\": 117.5}}", "examples": "{\"No\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"X1 transaction date\":{\"0\":2012.917,\"1\":2012.917,\"2\":2013.583,\"3\":2013.5},\"X2 house age\":{\"0\":32.0,\"1\":19.5,\"2\":13.3,\"3\":13.3},\"X3 distance to the nearest MRT station\":{\"0\":84.87882,\"1\":306.5947,\"2\":561.9845,\"3\":561.9845},\"X4 number of convenience stores\":{\"0\":10,\"1\":9,\"2\":5,\"3\":5},\"X5 latitude\":{\"0\":24.98298,\"1\":24.98034,\"2\":24.98746,\"3\":24.98746},\"X6 longitude\":{\"0\":121.54024,\"1\":121.53951,\"2\":121.54391,\"3\":121.54391},\"Y house price of unit area\":{\"0\":37.9,\"1\":42.2,\"2\":47.3,\"3\":54.8}}"}}]
true
2
<start_data_description><data_path>real-estate-price-prediction/Real estate.csv: <column_names> ['No', 'X1 transaction date', 'X2 house age', 'X3 distance to the nearest MRT station', 'X4 number of convenience stores', 'X5 latitude', 'X6 longitude', 'Y house price of unit area'] <column_types> {'No': 'int64', 'X1 transaction date': 'float64', 'X2 house age': 'float64', 'X3 distance to the nearest MRT station': 'float64', 'X4 number of convenience stores': 'int64', 'X5 latitude': 'float64', 'X6 longitude': 'float64', 'Y house price of unit area': 'float64'} <dataframe_Summary> {'No': {'count': 414.0, 'mean': 207.5, 'std': 119.6557562342907, 'min': 1.0, '25%': 104.25, '50%': 207.5, '75%': 310.75, 'max': 414.0}, 'X1 transaction date': {'count': 414.0, 'mean': 2013.1489710144926, 'std': 0.2819672402630115, 'min': 2012.667, '25%': 2012.917, '50%': 2013.167, '75%': 2013.417, 'max': 2013.583}, 'X2 house age': {'count': 414.0, 'mean': 17.71256038647343, 'std': 11.39248453324253, 'min': 0.0, '25%': 9.025, '50%': 16.1, '75%': 28.15, 'max': 43.8}, 'X3 distance to the nearest MRT station': {'count': 414.0, 'mean': 1083.8856889130436, 'std': 1262.1095954078512, 'min': 23.38284, '25%': 289.3248, '50%': 492.2313, '75%': 1454.279, 'max': 6488.021}, 'X4 number of convenience stores': {'count': 414.0, 'mean': 4.094202898550725, 'std': 2.945561805663618, 'min': 0.0, '25%': 1.0, '50%': 4.0, '75%': 6.0, 'max': 10.0}, 'X5 latitude': {'count': 414.0, 'mean': 24.969030072463767, 'std': 0.012410196590450338, 'min': 24.93207, '25%': 24.963, '50%': 24.9711, '75%': 24.977455, 'max': 25.01459}, 'X6 longitude': {'count': 414.0, 'mean': 121.53336108695655, 'std': 0.015347183004592205, 'min': 121.47353, '25%': 121.528085, '50%': 121.53863, '75%': 121.543305, 'max': 121.56627}, 'Y house price of unit area': {'count': 414.0, 'mean': 37.980193236714975, 'std': 13.606487697735314, 'min': 7.6, '25%': 27.7, '50%': 38.45, '75%': 46.6, 'max': 117.5}} <dataframe_info> RangeIndex: 414 entries, 0 to 413 Data columns (total 8 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 No 414 non-null int64 1 X1 transaction date 414 non-null float64 2 X2 house age 414 non-null float64 3 X3 distance to the nearest MRT station 414 non-null float64 4 X4 number of convenience stores 414 non-null int64 5 X5 latitude 414 non-null float64 6 X6 longitude 414 non-null float64 7 Y house price of unit area 414 non-null float64 dtypes: float64(6), int64(2) memory usage: 26.0 KB <some_examples> {'No': {'0': 1, '1': 2, '2': 3, '3': 4}, 'X1 transaction date': {'0': 2012.917, '1': 2012.917, '2': 2013.583, '3': 2013.5}, 'X2 house age': {'0': 32.0, '1': 19.5, '2': 13.3, '3': 13.3}, 'X3 distance to the nearest MRT station': {'0': 84.87882, '1': 306.5947, '2': 561.9845, '3': 561.9845}, 'X4 number of convenience stores': {'0': 10, '1': 9, '2': 5, '3': 5}, 'X5 latitude': {'0': 24.98298, '1': 24.98034, '2': 24.98746, '3': 24.98746}, 'X6 longitude': {'0': 121.54024, '1': 121.53951, '2': 121.54391, '3': 121.54391}, 'Y house price of unit area': {'0': 37.9, '1': 42.2, '2': 47.3, '3': 54.8}} <end_description>
1,294
0
2,097
1,294
69696983
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # #### import libraries, and load data import matplotlib.pyplot as plt df = pd.read_csv("/kaggle/input/titanic/train.csv", index_col="PassengerId") df df.shape df.info() df.isna().sum() y = df["Survived"] y x = df.drop("Survived", axis=1) x.shape x.drop("Name", axis=1, inplace=True) x len(x.Ticket.unique()) x.drop("Ticket", axis=True, inplace=True) x len(x.Fare.unique()) x.Age.isna().sum() x.Age.fillna(0, inplace=True) x.Age.isna().sum() x.Sex = pd.get_dummies(x.Sex, drop_first=True) x.Sex x f = x.Embarked.dropna().mode() # x.Embarked.fillna(str(f), inplace=True) x.isna().sum() x.Embarked.isna().index x.Cabin.fillna("N", inplace=True) x.Cabin x.Cabin = x.Cabin.str[0] x.Cabin x.Age = x.Age.astype(np.int16) x.Parch.value_counts() x.isna().sum()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/696/69696983.ipynb
null
null
[{"Id": 69696983, "ScriptId": 19041444, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6366037, "CreationDate": "08/02/2021 20:26:35", "VersionNumber": 1.0, "Title": "Titanic Salama4ai", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 81.0, "LinesInsertedFromPrevious": 81.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # #### import libraries, and load data import matplotlib.pyplot as plt df = pd.read_csv("/kaggle/input/titanic/train.csv", index_col="PassengerId") df df.shape df.info() df.isna().sum() y = df["Survived"] y x = df.drop("Survived", axis=1) x.shape x.drop("Name", axis=1, inplace=True) x len(x.Ticket.unique()) x.drop("Ticket", axis=True, inplace=True) x len(x.Fare.unique()) x.Age.isna().sum() x.Age.fillna(0, inplace=True) x.Age.isna().sum() x.Sex = pd.get_dummies(x.Sex, drop_first=True) x.Sex x f = x.Embarked.dropna().mode() # x.Embarked.fillna(str(f), inplace=True) x.isna().sum() x.Embarked.isna().index x.Cabin.fillna("N", inplace=True) x.Cabin x.Cabin = x.Cabin.str[0] x.Cabin x.Age = x.Age.astype(np.int16) x.Parch.value_counts() x.isna().sum()
false
0
519
0
519
519
69696809
# # Car Auctions # ## Introduction # In this notebook I will build some machine learning models with the purpose of predicting if a specific car bought at auction is a bad/good purchase. # In this case, supervised machine learning techniques will be used, as we will build models through labeled data. # ## About Dataset # SOME INFORMATION ABOUT DATASET + LINK # Link Dataset: https://www.kaggle.com/c/DontGetKicked # ## Imports and Dataset Download import opendatasets as od import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np import matplotlib import os pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", 150) sns.set_style("darkgrid") matplotlib.rcParams["font.size"] = 14 matplotlib.rcParams["figure.figsize"] = (10, 6) matplotlib.rcParams["figure.facecolor"] = "#00000000" od.download("https://www.kaggle.com/c/DontGetKicked") os.listdir("DontGetKicked") # ## Training Set and Test Set train_df = pd.read_csv("DontGetKicked/training.csv") train_df.head() test_df = pd.read_csv("DontGetKicked/test.csv") test_df.head() # ## Inputs and Output train_df.columns inputs = [ "Auction", "VehYear", "VehicleAge", "Make", "Model", "Trim", "SubModel", "Color", "Transmission", "WheelType", "VehOdo", "Nationality", "Size", "TopThreeAmericanName", "MMRAcquisitionAuctionAveragePrice", "MMRAcquisitionAuctionCleanPrice", "MMRAcquisitionRetailAveragePrice", "MMRAcquisitonRetailCleanPrice", "MMRCurrentAuctionAveragePrice", "MMRCurrentAuctionCleanPrice", "MMRCurrentRetailAveragePrice", "MMRCurrentRetailCleanPrice", "PRIMEUNIT", "AUCGUART", "VehBCost", "WarrantyCost", ] output = "IsBadBuy" inputs_df = train_df[inputs].copy() output_df = train_df[output].copy() # ## Data Preprocessing # ### Null Data inputs_df.info() # As there is a little proportion of non-null data in relation to null data in columns "PRIMEUNIT" and "AUCGUART", we will not use them in our models. inputs_df = inputs_df.drop(columns=["PRIMEUNIT", "AUCGUART"], axis=1) inputs_df.info() # ### Imputing missing numeric values numeric_cols = inputs_df.select_dtypes(include=["int64", "float64"]).columns.tolist() inputs_df[numeric_cols].isna().sum().sort_values(ascending=False) # from sklearn.impute import SimpleImputer imputerNum = SimpleImputer(strategy="mean").fit(inputs_df[numeric_cols]) inputs_df[numeric_cols] = imputerNum.transform(inputs_df[numeric_cols]) inputs_df[numeric_cols].isna().sum() inputs_df[numeric_cols].info() # ### Scaling Numeric Features inputs_df[numeric_cols].describe().loc[["min", "max"]] from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler().fit(inputs_df[numeric_cols]) inputs_df[numeric_cols] = scaler.transform(inputs_df[numeric_cols]) inputs_df[numeric_cols].describe().loc[["min", "max"]] # ## Imputing missing categorical data categorical_cols = inputs_df.select_dtypes(include=["object"]).columns.tolist() imputerCat = SimpleImputer(strategy="constant", fill_value="missing") imputerCat.fit(inputs_df[categorical_cols]) inputs_df[categorical_cols] = imputerCat.transform(inputs_df[categorical_cols]) # ### Encoding Categorical Data inputs_df[categorical_cols].nunique().sort_values(ascending=False) from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False, handle_unknown="ignore").fit( inputs_df[categorical_cols] ) encoded_cols = list(encoder.get_feature_names(categorical_cols)) len(encoded_cols) encoded_df = pd.DataFrame( encoder.transform(inputs_df[categorical_cols]), columns=encoded_cols ) inputs_df = pd.concat((inputs_df, encoded_df), axis=1) inputs_df.head() # ## Validation Set from sklearn.model_selection import train_test_split train_inputs, val_inputs, train_targets, val_targets = train_test_split( inputs_df[numeric_cols + encoded_cols], output_df, test_size=0.25, random_state=20 ) # ## Model 1: Logistic Regression from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score logisticModel = LogisticRegression(solver="liblinear") logisticModel.fit(train_inputs, train_targets) # #### Accuracy train_preds = logisticModel.predict(train_inputs) val_preds = logisticModel.predict(val_inputs) accuracy_score(train_preds, train_targets) accuracy_score(val_preds, val_targets) # Through a logistic regression model we have reached a strong percentage of 89,6% of accuracy (validation set), therefore, it is really a confident model. # #### Saving the model import joblib logistic_model = { "model": logisticModel, "imputerNum": imputerNum, "imputerCat": imputerCat, "scaler": scaler, "encoder": encoder, "input_cols": train_inputs, "target_col": train_targets, "numeric_cols": numeric_cols, "categorical_cols": categorical_cols, "encoded_cols": encoded_cols, } joblib.dump(logistic_model, "logModel_carAuctions.joblib") # ## Model 2: Decision Tree from sklearn.tree import DecisionTreeClassifier, plot_tree decisionTreeModel = DecisionTreeClassifier(random_state=20) decisionTreeModel.fit(train_inputs, train_targets) plt.figure(figsize=(80, 20)) plot_tree( decisionTreeModel, feature_names=train_inputs.columns, max_depth=2, filled=True ) def plotting_importance(model): importance_df = pd.DataFrame( {"feature": train_inputs.columns, "importance": model.feature_importances_} ).sort_values("importance", ascending=False) plt.title("Feature Importance") sns.barplot(data=importance_df.head(10), x="importance", y="feature") plotting_importance(decisionTreeModel) # It seems that it is important to have the knowledge about the wheel type in order to make a decision. Next we can observe that the vehicle odometer ("VehOdo") and the the price we paid ("VehBCost") are important features to bear in mind. # #### Accuracy train_preds = decisionTreeModel.predict(train_inputs) val_preds = decisionTreeModel.predict(val_inputs) accuracy_score(train_preds, train_targets) accuracy_score(val_preds, val_targets) # It seems that our model is overfitting, as the train set has 100% of accuracy and the validation set has 82% of accuracy (Our model is adjusted to the train set, but it is not capable of predict properly through external data) # #### Testing def test_params(**params): model = DecisionTreeClassifier(random_state=42, **params).fit( train_inputs, train_targets ) return model.score(train_inputs, train_targets), model.score( val_inputs, val_targets ) test_params(max_depth=6) # #### 2º Decision Tree Model decisionTreeModel2 = DecisionTreeClassifier(random_state=20, max_depth=6) decisionTreeModel2.fit(train_inputs, train_targets) plt.figure(figsize=(80, 20)) plot_tree( decisionTreeModel2, feature_names=train_inputs.columns, max_depth=2, filled=True ) plotting_importance(decisionTreeModel2) # Besides the knowledge about wheel type (with significant importance in this case -> 0.7), odometer vehicle and price paid for the car, it appears that are also important the vehicle age and the fact of being the company "Manheim Auction" who makes the auction. # #### Accuracy train_preds = decisionTreeModel2.predict(train_inputs) val_preds = decisionTreeModel2.predict(val_inputs) accuracy_score(train_preds, train_targets) accuracy_score(val_preds, val_targets) # The second decision tree model is better than the first one, as, apparently, it is not overfitting. We have reached 90% of accuracy. # #### Saving the model decisionTree_Model = { "model": decisionTreeModel2, "imputerNum": imputerNum, "imputerCat": imputerCat, "scaler": scaler, "encoder": encoder, "input_cols": train_inputs, "target_col": train_targets, "numeric_cols": numeric_cols, "categorical_cols": categorical_cols, "encoded_cols": encoded_cols, } joblib.dump(decisionTree_Model, "decisionTreeModel_carAuctions.joblib") # ## Model 3: Random Forest from sklearn.ensemble import RandomForestClassifier randomForestModel = RandomForestClassifier(n_jobs=-1, random_state=20) randomForestModel.fit(train_inputs, train_targets) plotting_importance(randomForestModel) # Just like the decision tree models, it seems that the more important features are the wheel type, the vehicle odometer and the the price we paid. However, they have not too much importance themselves (no more than 0.08) # #### Accuracy randomForestModel.score(train_inputs, train_targets) randomForestModel.score(val_inputs, val_targets) # It seems that our model is a bit overfitting (as before) because the train set has 99% of accuracy and the validation set has 89% of accuracy (Our model is adjusted to the train set, but it is not capable of predict properly through external data) # #### Testing def test_paramsRF(**params): model = RandomForestRegressor(random_state=20, n_jobs=-1, **params).fit( train_inputs, train_targets ) train_rmse = mean_squared_error( model.predict(train_inputs), train_targets, squared=False ) val_rmse = mean_squared_error(model.predict(val_inputs), val_targets, squared=False) return train_rmse, val_rmse def test_param_and_plotRF(param_name, param_values): train_errors, val_errors = [], [] for value in param_values: params = {param_name: value} train_rmse, val_rmse = test_paramsRF(**params) train_errors.append(train_rmse) val_errors.append(val_rmse) plt.figure(figsize=(10, 6)) plt.title("Overfitting curve: " + param_name) plt.plot(param_values, train_errors, "b-o") plt.plot(param_values, val_errors, "r-o") plt.xlabel(param_name) plt.ylabel("RMSE") plt.legend(["Training", "Validation"]) test_param_and_plot("max_depth", [5, 10, 15, 20, 25, 30, 35]) test_param_and_plot("min_samples_leaf", [1, 10, 15, 20, 25, 30, 35, 40]) # #### 2º Random Forest Model randomForestModel2 = RandomForestClassifier( n_jobs=-1, random_state=20, max_depth=6, min_samples_leaf=20 ) randomForestModel2.fit(train_inputs, train_targets) plotting_importance(randomForestModel2) # FEATURES IMPORTANTES (MUY POCA IMPORTANCIA) # #### Accuracy randomForestModel2.score(train_inputs, train_targets) randomForestModel2.score(val_inputs, val_targets) # NO HAY OVERFITTING, LOGRAMOS UN 87.9% # #### Saving the model randomForest_Model = { "model": randomForestModel2, "imputerNum": imputerNum, "imputerCat": imputerCat, "scaler": scaler, "encoder": encoder, "input_cols": train_inputs, "target_col": train_targets, "numeric_cols": numeric_cols, "categorical_cols": categorical_cols, "encoded_cols": encoded_cols, } joblib.dump(randomForest_Model, "randomForestModel_carAuctions.joblib") # ## Model 4: Gradient Boosting # ## Model Comparison and Prediction Examples # ## Conclusions import jovian # Execute this to save new versions of the notebook jovian.commit(project="car-auctions", filename="car-auctions.ipynb")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/696/69696809.ipynb
null
null
[{"Id": 69696809, "ScriptId": 18951232, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5768985, "CreationDate": "08/02/2021 20:25:39", "VersionNumber": 8.0, "Title": "car_auctions", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 381.0, "LinesInsertedFromPrevious": 65.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 316.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Car Auctions # ## Introduction # In this notebook I will build some machine learning models with the purpose of predicting if a specific car bought at auction is a bad/good purchase. # In this case, supervised machine learning techniques will be used, as we will build models through labeled data. # ## About Dataset # SOME INFORMATION ABOUT DATASET + LINK # Link Dataset: https://www.kaggle.com/c/DontGetKicked # ## Imports and Dataset Download import opendatasets as od import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np import matplotlib import os pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", 150) sns.set_style("darkgrid") matplotlib.rcParams["font.size"] = 14 matplotlib.rcParams["figure.figsize"] = (10, 6) matplotlib.rcParams["figure.facecolor"] = "#00000000" od.download("https://www.kaggle.com/c/DontGetKicked") os.listdir("DontGetKicked") # ## Training Set and Test Set train_df = pd.read_csv("DontGetKicked/training.csv") train_df.head() test_df = pd.read_csv("DontGetKicked/test.csv") test_df.head() # ## Inputs and Output train_df.columns inputs = [ "Auction", "VehYear", "VehicleAge", "Make", "Model", "Trim", "SubModel", "Color", "Transmission", "WheelType", "VehOdo", "Nationality", "Size", "TopThreeAmericanName", "MMRAcquisitionAuctionAveragePrice", "MMRAcquisitionAuctionCleanPrice", "MMRAcquisitionRetailAveragePrice", "MMRAcquisitonRetailCleanPrice", "MMRCurrentAuctionAveragePrice", "MMRCurrentAuctionCleanPrice", "MMRCurrentRetailAveragePrice", "MMRCurrentRetailCleanPrice", "PRIMEUNIT", "AUCGUART", "VehBCost", "WarrantyCost", ] output = "IsBadBuy" inputs_df = train_df[inputs].copy() output_df = train_df[output].copy() # ## Data Preprocessing # ### Null Data inputs_df.info() # As there is a little proportion of non-null data in relation to null data in columns "PRIMEUNIT" and "AUCGUART", we will not use them in our models. inputs_df = inputs_df.drop(columns=["PRIMEUNIT", "AUCGUART"], axis=1) inputs_df.info() # ### Imputing missing numeric values numeric_cols = inputs_df.select_dtypes(include=["int64", "float64"]).columns.tolist() inputs_df[numeric_cols].isna().sum().sort_values(ascending=False) # from sklearn.impute import SimpleImputer imputerNum = SimpleImputer(strategy="mean").fit(inputs_df[numeric_cols]) inputs_df[numeric_cols] = imputerNum.transform(inputs_df[numeric_cols]) inputs_df[numeric_cols].isna().sum() inputs_df[numeric_cols].info() # ### Scaling Numeric Features inputs_df[numeric_cols].describe().loc[["min", "max"]] from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler().fit(inputs_df[numeric_cols]) inputs_df[numeric_cols] = scaler.transform(inputs_df[numeric_cols]) inputs_df[numeric_cols].describe().loc[["min", "max"]] # ## Imputing missing categorical data categorical_cols = inputs_df.select_dtypes(include=["object"]).columns.tolist() imputerCat = SimpleImputer(strategy="constant", fill_value="missing") imputerCat.fit(inputs_df[categorical_cols]) inputs_df[categorical_cols] = imputerCat.transform(inputs_df[categorical_cols]) # ### Encoding Categorical Data inputs_df[categorical_cols].nunique().sort_values(ascending=False) from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False, handle_unknown="ignore").fit( inputs_df[categorical_cols] ) encoded_cols = list(encoder.get_feature_names(categorical_cols)) len(encoded_cols) encoded_df = pd.DataFrame( encoder.transform(inputs_df[categorical_cols]), columns=encoded_cols ) inputs_df = pd.concat((inputs_df, encoded_df), axis=1) inputs_df.head() # ## Validation Set from sklearn.model_selection import train_test_split train_inputs, val_inputs, train_targets, val_targets = train_test_split( inputs_df[numeric_cols + encoded_cols], output_df, test_size=0.25, random_state=20 ) # ## Model 1: Logistic Regression from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score logisticModel = LogisticRegression(solver="liblinear") logisticModel.fit(train_inputs, train_targets) # #### Accuracy train_preds = logisticModel.predict(train_inputs) val_preds = logisticModel.predict(val_inputs) accuracy_score(train_preds, train_targets) accuracy_score(val_preds, val_targets) # Through a logistic regression model we have reached a strong percentage of 89,6% of accuracy (validation set), therefore, it is really a confident model. # #### Saving the model import joblib logistic_model = { "model": logisticModel, "imputerNum": imputerNum, "imputerCat": imputerCat, "scaler": scaler, "encoder": encoder, "input_cols": train_inputs, "target_col": train_targets, "numeric_cols": numeric_cols, "categorical_cols": categorical_cols, "encoded_cols": encoded_cols, } joblib.dump(logistic_model, "logModel_carAuctions.joblib") # ## Model 2: Decision Tree from sklearn.tree import DecisionTreeClassifier, plot_tree decisionTreeModel = DecisionTreeClassifier(random_state=20) decisionTreeModel.fit(train_inputs, train_targets) plt.figure(figsize=(80, 20)) plot_tree( decisionTreeModel, feature_names=train_inputs.columns, max_depth=2, filled=True ) def plotting_importance(model): importance_df = pd.DataFrame( {"feature": train_inputs.columns, "importance": model.feature_importances_} ).sort_values("importance", ascending=False) plt.title("Feature Importance") sns.barplot(data=importance_df.head(10), x="importance", y="feature") plotting_importance(decisionTreeModel) # It seems that it is important to have the knowledge about the wheel type in order to make a decision. Next we can observe that the vehicle odometer ("VehOdo") and the the price we paid ("VehBCost") are important features to bear in mind. # #### Accuracy train_preds = decisionTreeModel.predict(train_inputs) val_preds = decisionTreeModel.predict(val_inputs) accuracy_score(train_preds, train_targets) accuracy_score(val_preds, val_targets) # It seems that our model is overfitting, as the train set has 100% of accuracy and the validation set has 82% of accuracy (Our model is adjusted to the train set, but it is not capable of predict properly through external data) # #### Testing def test_params(**params): model = DecisionTreeClassifier(random_state=42, **params).fit( train_inputs, train_targets ) return model.score(train_inputs, train_targets), model.score( val_inputs, val_targets ) test_params(max_depth=6) # #### 2º Decision Tree Model decisionTreeModel2 = DecisionTreeClassifier(random_state=20, max_depth=6) decisionTreeModel2.fit(train_inputs, train_targets) plt.figure(figsize=(80, 20)) plot_tree( decisionTreeModel2, feature_names=train_inputs.columns, max_depth=2, filled=True ) plotting_importance(decisionTreeModel2) # Besides the knowledge about wheel type (with significant importance in this case -> 0.7), odometer vehicle and price paid for the car, it appears that are also important the vehicle age and the fact of being the company "Manheim Auction" who makes the auction. # #### Accuracy train_preds = decisionTreeModel2.predict(train_inputs) val_preds = decisionTreeModel2.predict(val_inputs) accuracy_score(train_preds, train_targets) accuracy_score(val_preds, val_targets) # The second decision tree model is better than the first one, as, apparently, it is not overfitting. We have reached 90% of accuracy. # #### Saving the model decisionTree_Model = { "model": decisionTreeModel2, "imputerNum": imputerNum, "imputerCat": imputerCat, "scaler": scaler, "encoder": encoder, "input_cols": train_inputs, "target_col": train_targets, "numeric_cols": numeric_cols, "categorical_cols": categorical_cols, "encoded_cols": encoded_cols, } joblib.dump(decisionTree_Model, "decisionTreeModel_carAuctions.joblib") # ## Model 3: Random Forest from sklearn.ensemble import RandomForestClassifier randomForestModel = RandomForestClassifier(n_jobs=-1, random_state=20) randomForestModel.fit(train_inputs, train_targets) plotting_importance(randomForestModel) # Just like the decision tree models, it seems that the more important features are the wheel type, the vehicle odometer and the the price we paid. However, they have not too much importance themselves (no more than 0.08) # #### Accuracy randomForestModel.score(train_inputs, train_targets) randomForestModel.score(val_inputs, val_targets) # It seems that our model is a bit overfitting (as before) because the train set has 99% of accuracy and the validation set has 89% of accuracy (Our model is adjusted to the train set, but it is not capable of predict properly through external data) # #### Testing def test_paramsRF(**params): model = RandomForestRegressor(random_state=20, n_jobs=-1, **params).fit( train_inputs, train_targets ) train_rmse = mean_squared_error( model.predict(train_inputs), train_targets, squared=False ) val_rmse = mean_squared_error(model.predict(val_inputs), val_targets, squared=False) return train_rmse, val_rmse def test_param_and_plotRF(param_name, param_values): train_errors, val_errors = [], [] for value in param_values: params = {param_name: value} train_rmse, val_rmse = test_paramsRF(**params) train_errors.append(train_rmse) val_errors.append(val_rmse) plt.figure(figsize=(10, 6)) plt.title("Overfitting curve: " + param_name) plt.plot(param_values, train_errors, "b-o") plt.plot(param_values, val_errors, "r-o") plt.xlabel(param_name) plt.ylabel("RMSE") plt.legend(["Training", "Validation"]) test_param_and_plot("max_depth", [5, 10, 15, 20, 25, 30, 35]) test_param_and_plot("min_samples_leaf", [1, 10, 15, 20, 25, 30, 35, 40]) # #### 2º Random Forest Model randomForestModel2 = RandomForestClassifier( n_jobs=-1, random_state=20, max_depth=6, min_samples_leaf=20 ) randomForestModel2.fit(train_inputs, train_targets) plotting_importance(randomForestModel2) # FEATURES IMPORTANTES (MUY POCA IMPORTANCIA) # #### Accuracy randomForestModel2.score(train_inputs, train_targets) randomForestModel2.score(val_inputs, val_targets) # NO HAY OVERFITTING, LOGRAMOS UN 87.9% # #### Saving the model randomForest_Model = { "model": randomForestModel2, "imputerNum": imputerNum, "imputerCat": imputerCat, "scaler": scaler, "encoder": encoder, "input_cols": train_inputs, "target_col": train_targets, "numeric_cols": numeric_cols, "categorical_cols": categorical_cols, "encoded_cols": encoded_cols, } joblib.dump(randomForest_Model, "randomForestModel_carAuctions.joblib") # ## Model 4: Gradient Boosting # ## Model Comparison and Prediction Examples # ## Conclusions import jovian # Execute this to save new versions of the notebook jovian.commit(project="car-auctions", filename="car-auctions.ipynb")
false
0
3,305
0
3,305
3,305
69702844
<jupyter_start><jupyter_text>Social Network Ads ### Context This is a data for learning and implementing your first ML models . ### Content It includes age and estimated salary of the user. The purchased column indicates weather the particular user with age and estimated salary have bought the product or not by viewing the social ads of the product . - 0 : No - 1 : Yes Kaggle dataset identifier: social-network-ads <jupyter_code>import pandas as pd df = pd.read_csv('social-network-ads/Social_Network_Ads.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 400 entries, 0 to 399 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Age 400 non-null int64 1 EstimatedSalary 400 non-null int64 2 Purchased 400 non-null int64 dtypes: int64(3) memory usage: 9.5 KB <jupyter_text>Examples: { "Age": 19, "EstimatedSalary": 19000, "Purchased": 0 } { "Age": 35, "EstimatedSalary": 20000, "Purchased": 0 } { "Age": 26, "EstimatedSalary": 43000, "Purchased": 0 } { "Age": 27, "EstimatedSalary": 57000, "Purchased": 0 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv("../input/social-network-ads/Social_Network_Ads.csv") df.head() df.Purchased.value_counts() # ### NOW MAKE THE DEPENDENT MATRIX AND INDEPENDENT MATRIX X = df[["Age", "EstimatedSalary"]] y = df["Purchased"] print(X.head()) y.head() # ### NOW SPLIT THE DATASET INTO TRAINING SET AND TEST SET from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=23 ) # ### TRAIN THE MODEL from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier() model.fit(X_train, y_train) result = model.predict(X_test) print(result) score = 100 * model.score(X_test, y_test) print(score) # ### REPORT from sklearn.metrics import classification_report print(classification_report(y_test, result))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/702/69702844.ipynb
social-network-ads
shub99
[{"Id": 69702844, "ScriptId": 19046122, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4948300, "CreationDate": "08/02/2021 21:03:10", "VersionNumber": 1.0, "Title": "Sample To Begin", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 57.0, "LinesInsertedFromPrevious": 57.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
[{"Id": 93175596, "KernelVersionId": 69702844, "SourceDatasetVersionId": 2492955}]
[{"Id": 2492955, "DatasetId": 1509153, "DatasourceVersionId": 2535537, "CreatorUserId": 4948300, "LicenseName": "Community Data License Agreement - Sharing - Version 1.0", "CreationDate": "08/02/2021 20:15:08", "VersionNumber": 1.0, "Title": "Social Network Ads", "Slug": "social-network-ads", "Subtitle": "Social Network Ads - Random Forest", "Description": "### Context\n\nThis is a data for learning and implementing your first ML models .\n\n\n### Content\n\nIt includes age and estimated salary of the user. The purchased column indicates weather the particular user with age and estimated salary have bought the product or not by viewing the social ads of the product .\n- 0 : No\n- 1 : Yes", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1509153, "CreatorUserId": 4948300, "OwnerUserId": 4948300.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2492955.0, "CurrentDatasourceVersionId": 2535537.0, "ForumId": 1528903, "Type": 2, "CreationDate": "08/02/2021 20:15:08", "LastActivityDate": "08/02/2021", "TotalViews": 12973, "TotalDownloads": 1340, "TotalVotes": 31, "TotalKernels": 10}]
[{"Id": 4948300, "UserName": "shub99", "DisplayName": "Shubham Singh", "RegisterDate": "04/24/2020", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv("../input/social-network-ads/Social_Network_Ads.csv") df.head() df.Purchased.value_counts() # ### NOW MAKE THE DEPENDENT MATRIX AND INDEPENDENT MATRIX X = df[["Age", "EstimatedSalary"]] y = df["Purchased"] print(X.head()) y.head() # ### NOW SPLIT THE DATASET INTO TRAINING SET AND TEST SET from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=23 ) # ### TRAIN THE MODEL from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier() model.fit(X_train, y_train) result = model.predict(X_test) print(result) score = 100 * model.score(X_test, y_test) print(score) # ### REPORT from sklearn.metrics import classification_report print(classification_report(y_test, result))
[{"social-network-ads/Social_Network_Ads.csv": {"column_names": "[\"Age\", \"EstimatedSalary\", \"Purchased\"]", "column_data_types": "{\"Age\": \"int64\", \"EstimatedSalary\": \"int64\", \"Purchased\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 400 entries, 0 to 399\nData columns (total 3 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 Age 400 non-null int64\n 1 EstimatedSalary 400 non-null int64\n 2 Purchased 400 non-null int64\ndtypes: int64(3)\nmemory usage: 9.5 KB\n", "summary": "{\"Age\": {\"count\": 400.0, \"mean\": 37.655, \"std\": 10.482876597307914, \"min\": 18.0, \"25%\": 29.75, \"50%\": 37.0, \"75%\": 46.0, \"max\": 60.0}, \"EstimatedSalary\": {\"count\": 400.0, \"mean\": 69742.5, \"std\": 34096.960282424785, \"min\": 15000.0, \"25%\": 43000.0, \"50%\": 70000.0, \"75%\": 88000.0, \"max\": 150000.0}, \"Purchased\": {\"count\": 400.0, \"mean\": 0.3575, \"std\": 0.479863963596869, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"Age\":{\"0\":19,\"1\":35,\"2\":26,\"3\":27},\"EstimatedSalary\":{\"0\":19000,\"1\":20000,\"2\":43000,\"3\":57000},\"Purchased\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0}}"}}]
true
1
<start_data_description><data_path>social-network-ads/Social_Network_Ads.csv: <column_names> ['Age', 'EstimatedSalary', 'Purchased'] <column_types> {'Age': 'int64', 'EstimatedSalary': 'int64', 'Purchased': 'int64'} <dataframe_Summary> {'Age': {'count': 400.0, 'mean': 37.655, 'std': 10.482876597307914, 'min': 18.0, '25%': 29.75, '50%': 37.0, '75%': 46.0, 'max': 60.0}, 'EstimatedSalary': {'count': 400.0, 'mean': 69742.5, 'std': 34096.960282424785, 'min': 15000.0, '25%': 43000.0, '50%': 70000.0, '75%': 88000.0, 'max': 150000.0}, 'Purchased': {'count': 400.0, 'mean': 0.3575, 'std': 0.479863963596869, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}} <dataframe_info> RangeIndex: 400 entries, 0 to 399 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Age 400 non-null int64 1 EstimatedSalary 400 non-null int64 2 Purchased 400 non-null int64 dtypes: int64(3) memory usage: 9.5 KB <some_examples> {'Age': {'0': 19, '1': 35, '2': 26, '3': 27}, 'EstimatedSalary': {'0': 19000, '1': 20000, '2': 43000, '3': 57000}, 'Purchased': {'0': 0, '1': 0, '2': 0, '3': 0}} <end_description>
472
5
863
472
69702464
<jupyter_start><jupyter_text>Crime Data in Brazil ### Context Brazil has a very powerful Freedom of Information law which allows any citizen to request any data from the government which is not restricted, and where these restrictions are well defined exceptions. But still, having the right to request the information does not mean it is easy to get it. Bureaucracy and ignorance of the law gets in the way many times. In order to encourage the government to put their databases in order and to inspire people to have the courage to ask the government for information, we made a massive request of information, for the complete dataset of crime data available for the last 10 years, in the biggest city of South America. ### Content This dataset contains structured data about all crime occurrences that have been acted upon by the PM, the main police force in Sao Paulo. The dataset is not consistent in its completeness, as some of the towns comprising the Greater Sao Paulo were slow in collecting full data. It also does not contain the actual historic of each crime report, as that would violate privacy. Kaggle dataset identifier: crime-data-in-brazil <jupyter_code>import pandas as pd df = pd.read_csv('crime-data-in-brazil/BO_2016.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 774662 entries, 0 to 774661 Data columns (total 22 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 NUM_BO 774662 non-null int64 1 ANO_BO 774662 non-null int64 2 ID_DELEGACIA 774662 non-null int64 3 NOME_DEPARTAMENTO 774662 non-null object 4 NOME_SECCIONAL 774662 non-null object 5 DELEGACIA 774662 non-null object 6 NOME_DEPARTAMENTO_CIRC 774662 non-null object 7 NOME_SECCIONAL_CIRC 774662 non-null object 8 NOME_DELEGACIA_CIRC 774662 non-null object 9 ANO 774662 non-null int64 10 MES 774662 non-null int64 11 FLAG_STATUS 774662 non-null object 12 RUBRICA 774662 non-null object 13 DESDOBRAMENTO 30092 non-null object 14 CONDUTA 658523 non-null object 15 LATITUDE 731960 non-null float64 16 LONGITUDE 731959 non-null float64 17 CIDADE 774662 non-null object 18 LOGRADOURO 774660 non-null object 19 NUMERO_LOGRADOURO 774337 non-null object 20 FLAG_STATUS.1 774662 non-null object 21 Unnamed: 21 24 non-null object dtypes: float64(2), int64(5), object(15) memory usage: 130.0+ MB <jupyter_text>Examples: { "NUM_BO": 3784, "ANO_BO": 2016, "ID_DELEGACIA": 10101, "NOME_DEPARTAMENTO": "DECAP ", "NOME_SECCIONAL": "DEL.SEC.1\u00ba CENTRO ", "DELEGACIA": "01\u00ba D.P. SE ", "NOME_DEPARTAMENTO_CIRC": "DECAP ", "NOME_SECCIONAL_CIRC": "DEL.SEC.1\u00ba CENTRO ", "NOME_DELEGACIA_CIRC": "78\u00ba D.P. JARDINS ", "ANO": 2016, "MES": 7, "FLAG_STATUS": "C", "RUBRICA": "Furto (art. 155)", "DESDOBRAMENTO": NaN, "CONDUTA": "TRANSEUNTE", "LATITUDE": -23.56498421, "LONGITUDE": -46.65203458, "CIDADE": "S.PAULO ", "LOGRADOURO": "AVENIDA PAULISTA ", "NUMERO_LOGRADOURO": 1000, "...": "and 2 more columns" } { "NUM_BO": 3426, "ANO_BO": 2016, "ID_DELEGACIA": 10102, "NOME_DEPARTAMENTO": "DECAP ", "NOME_SECCIONAL": "DEL.SEC.1\u00ba CENTRO ", "DELEGACIA": "02\u00ba D.P. BOM RETIRO ", "NOME_DEPARTAMENTO_CIRC": "DECAP ", "NOME_SECCIONAL_CIRC": "DEL.SEC.1\u00ba CENTRO ", "NOME_DELEGACIA_CIRC": "03\u00ba D.P. CAMPOS ELISEOS ", "ANO": 2016, "MES": 5, "FLAG_STATUS": "C", "RUBRICA": "Roubo (art. 157)", "DESDOBRAMENTO": NaN, "CONDUTA": "TRANSEUNTE", "LATITUDE": -23.542476399999998, "LONGITUDE": -46.64192812, "CIDADE": "S.PAULO ", "LOGRADOURO": "PRA\u00c7A DA REPUBLICA ", "NUMERO_LOGRADOURO": 0, "...": "and 2 more columns" } { "NUM_BO": 6359, "ANO_BO": 2016, "ID_DELEGACIA": 10102, "NOME_DEPARTAMENTO": "DECAP ", "NOME_SECCIONAL": "DEL.SEC.1\u00ba CENTRO ", "DELEGACIA": "02\u00ba D.P. BOM RETIRO ", "NOME_DEPARTAMENTO_CIRC": "DECAP ", "NOME_SECCIONAL_CIRC": "DEL.SEC.1\u00ba CENTRO ", "NOME_DELEGACIA_CIRC": "03\u00ba D.P. CAMPOS ELISEOS ", "ANO": 2016, "MES": 10, "FLAG_STATUS": "C", "RUBRICA": "Drogas sem autoriza\u00e7\u00e3o ou em desacordo (Art.33, caput)", "DESDOBRAMENTO": NaN, "CONDUTA": null, "LATITUDE": -23.5421834, "LONGITUDE": -46.64059853, "CIDADE": "S.PAULO ", "LOGRADOURO": "RUA CONSELHEIRO NEBIAS ", "NUMERO_LOGRADOURO": 0, "...": "and 2 more columns" } { "NUM_BO": 1267, "ANO_BO": 2016, "ID_DELEGACIA": 10103, "NOME_DEPARTAMENTO": "DECAP ", "NOME_SECCIONAL": "DEL.SEC.1\u00ba CENTRO ", "DELEGACIA": "03\u00ba D.P. CAMPOS ELISEOS ", "NOME_DEPARTAMENTO_CIRC": "DECAP ", "NOME_SECCIONAL_CIRC": "DEL.SEC.8\u00ba SAO MATEUS ", "NOME_DELEGACIA_CIRC": "49\u00ba D.P. SAO MATEUS ", "ANO": 2016, "MES": 3, "FLAG_STATUS": "C", "RUBRICA": "Roubo (art. 157)", "DESDOBRAMENTO": NaN, "CONDUTA": "CARGA", "LATITUDE": -23.60927461, "LONGITUDE": -46.4550868, "CIDADE": "S.PAULO ", "LOGRADOURO": "RUA MADUREIRA CALHEIROS ", "NUMERO_LOGRADOURO": 15, "...": "and 2 more columns" } <jupyter_script>import pandas as pd pd.set_option("display.max_columns", 999) import numpy as np import geopandas as gpd import urbanpy as up import warnings warnings.filterwarnings("ignore") from matplotlib import pyplot as plt import contextily as ctx data = pd.read_csv("../input/crime-data-in-brazil/BO_2016.csv") data = gpd.GeoDataFrame( data, geometry=gpd.points_from_xy(data.LONGITUDE, data.LATITUDE) ) data.head() def clean_string1(x): string = x.split(".-")[-1] string = string.split("(")[0] return string.strip() data["Motivo"] = data["RUBRICA"].apply(clean_string1) amsp = up.download.nominatim_osm("Região Imediata de São Paulo") saop = up.download.nominatim_osm("Sao Paulo") brazil_hdx_datasets = [ "c17003d1-47f4-4ec5-8229-2f77aeb114be/resource/957218ee-c740-44c0-88e5-7faeef813a0c/download/population_bra_northeast_2018-10-01.csv.zip", "c17003d1-47f4-4ec5-8229-2f77aeb114be/resource/1e1f271b-1055-4365-b391-f6fdf3093fe2/download/population_bra_northwest_2018-10-01.csv.zip", "c17003d1-47f4-4ec5-8229-2f77aeb114be/resource/eb17516f-3c84-4626-95e4-df1f342f3d82/download/population_bra_southeast_2018-10-01.csv.zip", "c17003d1-47f4-4ec5-8229-2f77aeb114be/resource/5cb55d1a-9f11-4004-82f3-0c27e878495a/download/population_bra_southwest_2018-10-01.csv.zip", ] amsp.crs = {"init": "EPSG:4326"} saop.crs = {"init": "EPSG:4326"} data.crs = {"init": "EPSG:4326"} data.groupby("Motivo").size().sort_values() dict_crimes = {} for m in data["Motivo"].unique(): if ("furto" in m.lower()) or ("roubo" in m.lower()): dict_crimes[m] = "Robo" elif "lesão" in m.lower(): dict_crimes[m] = "Lesión" elif "homicídio" in m.lower(): dict_crimes[m] = "Homicidio" elif "estupro" in m.lower(): dict_crimes[m] = "Violación" elif ("entorpecente" in m.lower()) or ("droga" in m.lower()): dict_crimes[m] = "Tráfico de drogas" else: print(m, "-> No encuentra") data["Categoria"] = data["Motivo"].apply(lambda x: dict_crimes[x]) data.groupby("Categoria").size() fig, ax = plt.subplots(1, figsize=(20, 15), dpi=500) axins = ax.inset_axes([0.5, -0.05, 0.6, 0.6]) axins.set(xlim=(-5.225e6, -5.15e6), ylim=(-2.73e6, -2.68e6)) amsp.boundary.to_crs(epsg=3857).plot(ax=ax, color="black", lw=1) data.sample(frac=0.3).to_crs(epsg=3857).plot( ax=ax, column="Crimenes", color="red", alpha=0.5, legend=True, markersize=0.01 ) data.sample(frac=0.3).to_crs(epsg=3857).plot( ax=axins, column="Crimenes", color="red", alpha=0.25, legend=True, markersize=0.1 ) ctx.add_basemap(ax, source=ctx.providers.Stamen.TonerLite) ctx.add_basemap(axins, source=ctx.providers.Stamen.TonerLite) ax.indicate_inset_zoom(axins) ax.set_xticklabels("") ax.set_yticklabels("") axins.set_xticklabels("") axins.set_yticklabels("") plt.title( "Crimenes reportados en el Área Metropolitana de Sao Paulo en 2016", fontsize=20 ) fig.show() pop_bra = [] for link in brazil_hdx_datasets: pops = up.download.hdx_dataset(link) pop_bra.append(pops) pop_bra = pd.concat(pop_bra) pop_df = up.geom.filter_population(pop_bra, amsp) amsp_hex = up.geom.gen_hexagons(6, amsp) saop_hex = up.geom.gen_hexagons(8, amsp) amsp_hex = up.geom.merge_shape_hex( amsp_hex, pop_df, {"population_2020": "sum"}, how="left", op="within" ) saop_hex = up.geom.merge_shape_hex( saop_hex, pop_df, {"population_2020": "sum"}, how="left", op="within" ) datavars = ["ANO", "MES", "Categoria", "geometry"] data1 = data[datavars] gdf_am = gpd.sjoin(amsp_hex, data1, how="left") gdf_sp = gpd.sjoin(saop_hex, data1, how="left") gdf_am.head() gdf_sp.head() print(gdf_am.shape, gdf_sp.shape) processed_am = ( gdf_am.groupby(["hex", "Categoria"]) .size() .unstack() .join( gdf_am[["hex", "geometry", "population_2020"]] .drop_duplicates() .set_index("hex"), how="right", ) .fillna(0) .reset_index() ) processed_sp = ( gdf_sp.groupby(["hex", "Categoria"]) .size() .unstack() .join( gdf_sp[["hex", "geometry", "population_2020"]] .drop_duplicates() .set_index("hex"), how="right", ) .fillna(0) .reset_index() ) processed_am["Crimenes"] = processed_am[ ["Homicidio", "Lesión", "Robo", "Tráfico de drogas", "Violación"] ].sum(1) processed_sp["Crimenes"] = processed_sp[ ["Homicidio", "Lesión", "Robo", "Tráfico de drogas", "Violación"] ].sum(1) processed_am = gpd.GeoDataFrame(processed_am) processed_sp = gpd.GeoDataFrame(processed_sp) processed_am.head() fig, ax = plt.subplots(1, figsize=(20, 15), dpi=500) amsp.boundary.to_crs(epsg=3857).plot(ax=ax, color="black", lw=1) saop.boundary.to_crs(epsg=3857).plot(ax=ax, color="black", lw=1) processed_am.to_crs(epsg=3857).plot( ax=ax, column="Crimenes", cmap="Spectral_r", alpha=0.5, legend=True, legend_kwds={ "location": "left", "label": "Cantidad de robos por hexágono (6)", "shrink": 0.6, }, ) axins = ax.inset_axes([0.55, -0.06, 0.6, 0.6]) axins.set(xlim=(-5.21e6, -5.175e6), ylim=(-2.715e6, -2.685e6)) saop.boundary.to_crs(epsg=3857).plot(ax=axins, color="black", lw=1) processed_sp.to_crs(epsg=3857).plot( ax=axins, column="Crimenes", cmap="Spectral_r", alpha=0.5, legend=True, legend_kwds={ "label": "Cantidad de robos por hexágono (8)", "location": "right", "shrink": 0.6, }, ) leg = axins.get_legend() plt.title( "Mapa H3 con resolución 6 y 8 del Departamento de São Paulo: Cantidad de crimenes por hexágono en 2016", fontsize=20, ) ctx.add_basemap(ax, source=ctx.providers.Stamen.TonerLite) ctx.add_basemap(axins, source=ctx.providers.Stamen.TonerLite) ax.indicate_inset_zoom(axins) ax.set_xticklabels("") ax.set_yticklabels("") axins.set_xticklabels("") axins.set_yticklabels("") fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/702/69702464.ipynb
crime-data-in-brazil
inquisitivecrow
[{"Id": 69702464, "ScriptId": 18927378, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3114701, "CreationDate": "08/02/2021 21:00:22", "VersionNumber": 2.0, "Title": "Geospatial EDA on Sao Pablo 2016 Crime Reports", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 168.0, "LinesInsertedFromPrevious": 47.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 121.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93175197, "KernelVersionId": 69702464, "SourceDatasetVersionId": 330988}]
[{"Id": 330988, "DatasetId": 5013, "DatasourceVersionId": 344561, "CreatorUserId": 494026, "LicenseName": "GPL 2", "CreationDate": "03/16/2019 08:25:25", "VersionNumber": 1.0, "Title": "Crime Data in Brazil", "Slug": "crime-data-in-brazil", "Subtitle": "All crime data for 10 years of police work in the biggest city of South America", "Description": "### Context\n\nBrazil has a very powerful Freedom of Information law which allows any citizen to request any data from the government which is not restricted, and where these restrictions are well defined exceptions. But still, having the right to request the information does not mean it is easy to get it. Bureaucracy and ignorance of the law gets in the way many times.\nIn order to encourage the government to put their databases in order and to inspire people to have the courage to ask the government for information, we made a massive request of information, for the complete dataset of crime data available for the last 10 years, in the biggest city of South America.\n\n### Content\n\nThis dataset contains structured data about all crime occurrences that have been acted upon by the PM, the main police force in Sao Paulo. The dataset is not consistent in its completeness, as some of the towns comprising the Greater Sao Paulo were slow in collecting full data. It also does not contain the actual historic of each crime report, as that would violate privacy.\n\n\n### Acknowledgements\n\nWe would like to acknowledge the prompt assistance from the SSP (Secretaria de Seguranca Publica), for providing the data with minimal resistance.\n\n\n### Inspiration\n\nPrimarily we would like to see a visualisation of this data, so that the people can have an idea of how crime has evolved in their city, which crimes are more prevalent in which areas, etc.\nIn addition, any model which can predict at what times and where the police is most needed would be helpful, as this can then be sent to the SSP to help them in planning.", "VersionNotes": "Properly separated and complete data", "TotalCompressedBytes": 9093140055.0, "TotalUncompressedBytes": 909215993.0}]
[{"Id": 5013, "CreatorUserId": 494026, "OwnerUserId": 494026.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 330988.0, "CurrentDatasourceVersionId": 344561.0, "ForumId": 11024, "Type": 2, "CreationDate": "11/21/2017 05:36:19", "LastActivityDate": "02/04/2018", "TotalViews": 39089, "TotalDownloads": 4695, "TotalVotes": 151, "TotalKernels": 15}]
[{"Id": 494026, "UserName": "inquisitivecrow", "DisplayName": "Marco Zanchi", "RegisterDate": "12/27/2015", "PerformanceTier": 1}]
import pandas as pd pd.set_option("display.max_columns", 999) import numpy as np import geopandas as gpd import urbanpy as up import warnings warnings.filterwarnings("ignore") from matplotlib import pyplot as plt import contextily as ctx data = pd.read_csv("../input/crime-data-in-brazil/BO_2016.csv") data = gpd.GeoDataFrame( data, geometry=gpd.points_from_xy(data.LONGITUDE, data.LATITUDE) ) data.head() def clean_string1(x): string = x.split(".-")[-1] string = string.split("(")[0] return string.strip() data["Motivo"] = data["RUBRICA"].apply(clean_string1) amsp = up.download.nominatim_osm("Região Imediata de São Paulo") saop = up.download.nominatim_osm("Sao Paulo") brazil_hdx_datasets = [ "c17003d1-47f4-4ec5-8229-2f77aeb114be/resource/957218ee-c740-44c0-88e5-7faeef813a0c/download/population_bra_northeast_2018-10-01.csv.zip", "c17003d1-47f4-4ec5-8229-2f77aeb114be/resource/1e1f271b-1055-4365-b391-f6fdf3093fe2/download/population_bra_northwest_2018-10-01.csv.zip", "c17003d1-47f4-4ec5-8229-2f77aeb114be/resource/eb17516f-3c84-4626-95e4-df1f342f3d82/download/population_bra_southeast_2018-10-01.csv.zip", "c17003d1-47f4-4ec5-8229-2f77aeb114be/resource/5cb55d1a-9f11-4004-82f3-0c27e878495a/download/population_bra_southwest_2018-10-01.csv.zip", ] amsp.crs = {"init": "EPSG:4326"} saop.crs = {"init": "EPSG:4326"} data.crs = {"init": "EPSG:4326"} data.groupby("Motivo").size().sort_values() dict_crimes = {} for m in data["Motivo"].unique(): if ("furto" in m.lower()) or ("roubo" in m.lower()): dict_crimes[m] = "Robo" elif "lesão" in m.lower(): dict_crimes[m] = "Lesión" elif "homicídio" in m.lower(): dict_crimes[m] = "Homicidio" elif "estupro" in m.lower(): dict_crimes[m] = "Violación" elif ("entorpecente" in m.lower()) or ("droga" in m.lower()): dict_crimes[m] = "Tráfico de drogas" else: print(m, "-> No encuentra") data["Categoria"] = data["Motivo"].apply(lambda x: dict_crimes[x]) data.groupby("Categoria").size() fig, ax = plt.subplots(1, figsize=(20, 15), dpi=500) axins = ax.inset_axes([0.5, -0.05, 0.6, 0.6]) axins.set(xlim=(-5.225e6, -5.15e6), ylim=(-2.73e6, -2.68e6)) amsp.boundary.to_crs(epsg=3857).plot(ax=ax, color="black", lw=1) data.sample(frac=0.3).to_crs(epsg=3857).plot( ax=ax, column="Crimenes", color="red", alpha=0.5, legend=True, markersize=0.01 ) data.sample(frac=0.3).to_crs(epsg=3857).plot( ax=axins, column="Crimenes", color="red", alpha=0.25, legend=True, markersize=0.1 ) ctx.add_basemap(ax, source=ctx.providers.Stamen.TonerLite) ctx.add_basemap(axins, source=ctx.providers.Stamen.TonerLite) ax.indicate_inset_zoom(axins) ax.set_xticklabels("") ax.set_yticklabels("") axins.set_xticklabels("") axins.set_yticklabels("") plt.title( "Crimenes reportados en el Área Metropolitana de Sao Paulo en 2016", fontsize=20 ) fig.show() pop_bra = [] for link in brazil_hdx_datasets: pops = up.download.hdx_dataset(link) pop_bra.append(pops) pop_bra = pd.concat(pop_bra) pop_df = up.geom.filter_population(pop_bra, amsp) amsp_hex = up.geom.gen_hexagons(6, amsp) saop_hex = up.geom.gen_hexagons(8, amsp) amsp_hex = up.geom.merge_shape_hex( amsp_hex, pop_df, {"population_2020": "sum"}, how="left", op="within" ) saop_hex = up.geom.merge_shape_hex( saop_hex, pop_df, {"population_2020": "sum"}, how="left", op="within" ) datavars = ["ANO", "MES", "Categoria", "geometry"] data1 = data[datavars] gdf_am = gpd.sjoin(amsp_hex, data1, how="left") gdf_sp = gpd.sjoin(saop_hex, data1, how="left") gdf_am.head() gdf_sp.head() print(gdf_am.shape, gdf_sp.shape) processed_am = ( gdf_am.groupby(["hex", "Categoria"]) .size() .unstack() .join( gdf_am[["hex", "geometry", "population_2020"]] .drop_duplicates() .set_index("hex"), how="right", ) .fillna(0) .reset_index() ) processed_sp = ( gdf_sp.groupby(["hex", "Categoria"]) .size() .unstack() .join( gdf_sp[["hex", "geometry", "population_2020"]] .drop_duplicates() .set_index("hex"), how="right", ) .fillna(0) .reset_index() ) processed_am["Crimenes"] = processed_am[ ["Homicidio", "Lesión", "Robo", "Tráfico de drogas", "Violación"] ].sum(1) processed_sp["Crimenes"] = processed_sp[ ["Homicidio", "Lesión", "Robo", "Tráfico de drogas", "Violación"] ].sum(1) processed_am = gpd.GeoDataFrame(processed_am) processed_sp = gpd.GeoDataFrame(processed_sp) processed_am.head() fig, ax = plt.subplots(1, figsize=(20, 15), dpi=500) amsp.boundary.to_crs(epsg=3857).plot(ax=ax, color="black", lw=1) saop.boundary.to_crs(epsg=3857).plot(ax=ax, color="black", lw=1) processed_am.to_crs(epsg=3857).plot( ax=ax, column="Crimenes", cmap="Spectral_r", alpha=0.5, legend=True, legend_kwds={ "location": "left", "label": "Cantidad de robos por hexágono (6)", "shrink": 0.6, }, ) axins = ax.inset_axes([0.55, -0.06, 0.6, 0.6]) axins.set(xlim=(-5.21e6, -5.175e6), ylim=(-2.715e6, -2.685e6)) saop.boundary.to_crs(epsg=3857).plot(ax=axins, color="black", lw=1) processed_sp.to_crs(epsg=3857).plot( ax=axins, column="Crimenes", cmap="Spectral_r", alpha=0.5, legend=True, legend_kwds={ "label": "Cantidad de robos por hexágono (8)", "location": "right", "shrink": 0.6, }, ) leg = axins.get_legend() plt.title( "Mapa H3 con resolución 6 y 8 del Departamento de São Paulo: Cantidad de crimenes por hexágono en 2016", fontsize=20, ) ctx.add_basemap(ax, source=ctx.providers.Stamen.TonerLite) ctx.add_basemap(axins, source=ctx.providers.Stamen.TonerLite) ax.indicate_inset_zoom(axins) ax.set_xticklabels("") ax.set_yticklabels("") axins.set_xticklabels("") axins.set_yticklabels("") fig.show()
[{"crime-data-in-brazil/BO_2016.csv": {"column_names": "[\"NUM_BO\", \"ANO_BO\", \"ID_DELEGACIA\", \"NOME_DEPARTAMENTO\", \"NOME_SECCIONAL\", \"DELEGACIA\", \"NOME_DEPARTAMENTO_CIRC\", \"NOME_SECCIONAL_CIRC\", \"NOME_DELEGACIA_CIRC\", \"ANO\", \"MES\", \"FLAG_STATUS\", \"RUBRICA\", \"DESDOBRAMENTO\", \"CONDUTA\", \"LATITUDE\", \"LONGITUDE\", \"CIDADE\", \"LOGRADOURO\", \"NUMERO_LOGRADOURO\", \"FLAG_STATUS.1\", \"Unnamed: 21\"]", "column_data_types": "{\"NUM_BO\": \"int64\", \"ANO_BO\": \"int64\", \"ID_DELEGACIA\": \"int64\", \"NOME_DEPARTAMENTO\": \"object\", \"NOME_SECCIONAL\": \"object\", \"DELEGACIA\": \"object\", \"NOME_DEPARTAMENTO_CIRC\": \"object\", \"NOME_SECCIONAL_CIRC\": \"object\", \"NOME_DELEGACIA_CIRC\": \"object\", \"ANO\": \"int64\", \"MES\": \"int64\", \"FLAG_STATUS\": \"object\", \"RUBRICA\": \"object\", \"DESDOBRAMENTO\": \"object\", \"CONDUTA\": \"object\", \"LATITUDE\": \"float64\", \"LONGITUDE\": \"float64\", \"CIDADE\": \"object\", \"LOGRADOURO\": \"object\", \"NUMERO_LOGRADOURO\": \"object\", \"FLAG_STATUS.1\": \"object\", \"Unnamed: 21\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 774662 entries, 0 to 774661\nData columns (total 22 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 NUM_BO 774662 non-null int64 \n 1 ANO_BO 774662 non-null int64 \n 2 ID_DELEGACIA 774662 non-null int64 \n 3 NOME_DEPARTAMENTO 774662 non-null object \n 4 NOME_SECCIONAL 774662 non-null object \n 5 DELEGACIA 774662 non-null object \n 6 NOME_DEPARTAMENTO_CIRC 774662 non-null object \n 7 NOME_SECCIONAL_CIRC 774662 non-null object \n 8 NOME_DELEGACIA_CIRC 774662 non-null object \n 9 ANO 774662 non-null int64 \n 10 MES 774662 non-null int64 \n 11 FLAG_STATUS 774662 non-null object \n 12 RUBRICA 774662 non-null object \n 13 DESDOBRAMENTO 30092 non-null object \n 14 CONDUTA 658523 non-null object \n 15 LATITUDE 731960 non-null float64\n 16 LONGITUDE 731959 non-null float64\n 17 CIDADE 774662 non-null object \n 18 LOGRADOURO 774660 non-null object \n 19 NUMERO_LOGRADOURO 774337 non-null object \n 20 FLAG_STATUS.1 774662 non-null object \n 21 Unnamed: 21 24 non-null object \ndtypes: float64(2), int64(5), object(15)\nmemory usage: 130.0+ MB\n", "summary": "{\"NUM_BO\": {\"count\": 774662.0, \"mean\": 288515.00654608075, \"std\": 483326.3992484536, \"min\": 1.0, \"25%\": 2000.0, \"50%\": 5282.0, \"75%\": 451696.25, \"max\": 1673679.0}, \"ANO_BO\": {\"count\": 774662.0, \"mean\": 2016.0097190774816, \"std\": 0.09810519577849644, \"min\": 2016.0, \"25%\": 2016.0, \"50%\": 2016.0, \"75%\": 2016.0, \"max\": 2017.0}, \"ID_DELEGACIA\": {\"count\": 774662.0, \"mean\": 323921.1129770145, \"std\": 415078.44680758385, \"min\": 10004.0, \"25%\": 10365.0, \"50%\": 30212.0, \"75%\": 900020.0, \"max\": 990900.0}, \"ANO\": {\"count\": 774662.0, \"mean\": 2016.0, \"std\": 0.0, \"min\": 2016.0, \"25%\": 2016.0, \"50%\": 2016.0, \"75%\": 2016.0, \"max\": 2016.0}, \"MES\": {\"count\": 774662.0, \"mean\": 6.515654569347664, \"std\": 3.4406640012293073, \"min\": 1.0, \"25%\": 4.0, \"50%\": 7.0, \"75%\": 10.0, \"max\": 12.0}, \"LATITUDE\": {\"count\": 731960.0, \"mean\": -23.572258600058962, \"std\": 0.08490143711393325, \"min\": -24.00459501, \"25%\": -23.63331643, \"50%\": -23.557159294999998, \"75%\": -23.51967196, \"max\": -21.77421214}, \"LONGITUDE\": {\"count\": 731959.0, \"mean\": -46.60945653000654, \"std\": 0.136165800780252, \"min\": -50.80017254, \"25%\": -46.69744082, \"50%\": -46.62661105, \"75%\": -46.51915416, \"max\": -45.75582715}}", "examples": "{\"NUM_BO\":{\"0\":3784,\"1\":3426,\"2\":6359,\"3\":1267},\"ANO_BO\":{\"0\":2016,\"1\":2016,\"2\":2016,\"3\":2016},\"ID_DELEGACIA\":{\"0\":10101,\"1\":10102,\"2\":10102,\"3\":10103},\"NOME_DEPARTAMENTO\":{\"0\":\"DECAP \",\"1\":\"DECAP \",\"2\":\"DECAP \",\"3\":\"DECAP \"},\"NOME_SECCIONAL\":{\"0\":\"DEL.SEC.1\\u00ba CENTRO \",\"1\":\"DEL.SEC.1\\u00ba CENTRO \",\"2\":\"DEL.SEC.1\\u00ba CENTRO \",\"3\":\"DEL.SEC.1\\u00ba CENTRO \"},\"DELEGACIA\":{\"0\":\"01\\u00ba D.P. SE \",\"1\":\"02\\u00ba D.P. BOM RETIRO \",\"2\":\"02\\u00ba D.P. BOM RETIRO \",\"3\":\"03\\u00ba D.P. CAMPOS ELISEOS \"},\"NOME_DEPARTAMENTO_CIRC\":{\"0\":\"DECAP \",\"1\":\"DECAP \",\"2\":\"DECAP \",\"3\":\"DECAP \"},\"NOME_SECCIONAL_CIRC\":{\"0\":\"DEL.SEC.1\\u00ba CENTRO \",\"1\":\"DEL.SEC.1\\u00ba CENTRO \",\"2\":\"DEL.SEC.1\\u00ba CENTRO \",\"3\":\"DEL.SEC.8\\u00ba SAO MATEUS \"},\"NOME_DELEGACIA_CIRC\":{\"0\":\"78\\u00ba D.P. JARDINS \",\"1\":\"03\\u00ba D.P. CAMPOS ELISEOS \",\"2\":\"03\\u00ba D.P. CAMPOS ELISEOS \",\"3\":\"49\\u00ba D.P. SAO MATEUS \"},\"ANO\":{\"0\":2016,\"1\":2016,\"2\":2016,\"3\":2016},\"MES\":{\"0\":7,\"1\":5,\"2\":10,\"3\":3},\"FLAG_STATUS\":{\"0\":\"C\",\"1\":\"C\",\"2\":\"C\",\"3\":\"C\"},\"RUBRICA\":{\"0\":\"Furto (art. 155)\",\"1\":\"Roubo (art. 157)\",\"2\":\"Drogas sem autoriza\\u00e7\\u00e3o ou em desacordo (Art.33, caput)\",\"3\":\"Roubo (art. 157)\"},\"DESDOBRAMENTO\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"CONDUTA\":{\"0\":\"TRANSEUNTE\",\"1\":\"TRANSEUNTE\",\"2\":null,\"3\":\"CARGA\"},\"LATITUDE\":{\"0\":-23.56498421,\"1\":-23.5424764,\"2\":-23.5421834,\"3\":-23.60927461},\"LONGITUDE\":{\"0\":-46.65203458,\"1\":-46.64192812,\"2\":-46.64059853,\"3\":-46.4550868},\"CIDADE\":{\"0\":\"S.PAULO \",\"1\":\"S.PAULO \",\"2\":\"S.PAULO \",\"3\":\"S.PAULO \"},\"LOGRADOURO\":{\"0\":\"AVENIDA PAULISTA \",\"1\":\"PRA\\u00c7A DA REPUBLICA \",\"2\":\"RUA CONSELHEIRO NEBIAS \",\"3\":\"RUA MADUREIRA CALHEIROS \"},\"NUMERO_LOGRADOURO\":{\"0\":\"1000\",\"1\":\"0\",\"2\":\"0\",\"3\":\"15\"},\"FLAG_STATUS.1\":{\"0\":\"C\",\"1\":\"C\",\"2\":\"C\",\"3\":\"C\"},\"Unnamed: 21\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null}}"}}]
true
1
<start_data_description><data_path>crime-data-in-brazil/BO_2016.csv: <column_names> ['NUM_BO', 'ANO_BO', 'ID_DELEGACIA', 'NOME_DEPARTAMENTO', 'NOME_SECCIONAL', 'DELEGACIA', 'NOME_DEPARTAMENTO_CIRC', 'NOME_SECCIONAL_CIRC', 'NOME_DELEGACIA_CIRC', 'ANO', 'MES', 'FLAG_STATUS', 'RUBRICA', 'DESDOBRAMENTO', 'CONDUTA', 'LATITUDE', 'LONGITUDE', 'CIDADE', 'LOGRADOURO', 'NUMERO_LOGRADOURO', 'FLAG_STATUS.1', 'Unnamed: 21'] <column_types> {'NUM_BO': 'int64', 'ANO_BO': 'int64', 'ID_DELEGACIA': 'int64', 'NOME_DEPARTAMENTO': 'object', 'NOME_SECCIONAL': 'object', 'DELEGACIA': 'object', 'NOME_DEPARTAMENTO_CIRC': 'object', 'NOME_SECCIONAL_CIRC': 'object', 'NOME_DELEGACIA_CIRC': 'object', 'ANO': 'int64', 'MES': 'int64', 'FLAG_STATUS': 'object', 'RUBRICA': 'object', 'DESDOBRAMENTO': 'object', 'CONDUTA': 'object', 'LATITUDE': 'float64', 'LONGITUDE': 'float64', 'CIDADE': 'object', 'LOGRADOURO': 'object', 'NUMERO_LOGRADOURO': 'object', 'FLAG_STATUS.1': 'object', 'Unnamed: 21': 'object'} <dataframe_Summary> {'NUM_BO': {'count': 774662.0, 'mean': 288515.00654608075, 'std': 483326.3992484536, 'min': 1.0, '25%': 2000.0, '50%': 5282.0, '75%': 451696.25, 'max': 1673679.0}, 'ANO_BO': {'count': 774662.0, 'mean': 2016.0097190774816, 'std': 0.09810519577849644, 'min': 2016.0, '25%': 2016.0, '50%': 2016.0, '75%': 2016.0, 'max': 2017.0}, 'ID_DELEGACIA': {'count': 774662.0, 'mean': 323921.1129770145, 'std': 415078.44680758385, 'min': 10004.0, '25%': 10365.0, '50%': 30212.0, '75%': 900020.0, 'max': 990900.0}, 'ANO': {'count': 774662.0, 'mean': 2016.0, 'std': 0.0, 'min': 2016.0, '25%': 2016.0, '50%': 2016.0, '75%': 2016.0, 'max': 2016.0}, 'MES': {'count': 774662.0, 'mean': 6.515654569347664, 'std': 3.4406640012293073, 'min': 1.0, '25%': 4.0, '50%': 7.0, '75%': 10.0, 'max': 12.0}, 'LATITUDE': {'count': 731960.0, 'mean': -23.572258600058962, 'std': 0.08490143711393325, 'min': -24.00459501, '25%': -23.63331643, '50%': -23.557159294999998, '75%': -23.51967196, 'max': -21.77421214}, 'LONGITUDE': {'count': 731959.0, 'mean': -46.60945653000654, 'std': 0.136165800780252, 'min': -50.80017254, '25%': -46.69744082, '50%': -46.62661105, '75%': -46.51915416, 'max': -45.75582715}} <dataframe_info> RangeIndex: 774662 entries, 0 to 774661 Data columns (total 22 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 NUM_BO 774662 non-null int64 1 ANO_BO 774662 non-null int64 2 ID_DELEGACIA 774662 non-null int64 3 NOME_DEPARTAMENTO 774662 non-null object 4 NOME_SECCIONAL 774662 non-null object 5 DELEGACIA 774662 non-null object 6 NOME_DEPARTAMENTO_CIRC 774662 non-null object 7 NOME_SECCIONAL_CIRC 774662 non-null object 8 NOME_DELEGACIA_CIRC 774662 non-null object 9 ANO 774662 non-null int64 10 MES 774662 non-null int64 11 FLAG_STATUS 774662 non-null object 12 RUBRICA 774662 non-null object 13 DESDOBRAMENTO 30092 non-null object 14 CONDUTA 658523 non-null object 15 LATITUDE 731960 non-null float64 16 LONGITUDE 731959 non-null float64 17 CIDADE 774662 non-null object 18 LOGRADOURO 774660 non-null object 19 NUMERO_LOGRADOURO 774337 non-null object 20 FLAG_STATUS.1 774662 non-null object 21 Unnamed: 21 24 non-null object dtypes: float64(2), int64(5), object(15) memory usage: 130.0+ MB <some_examples> {'NUM_BO': {'0': 3784, '1': 3426, '2': 6359, '3': 1267}, 'ANO_BO': {'0': 2016, '1': 2016, '2': 2016, '3': 2016}, 'ID_DELEGACIA': {'0': 10101, '1': 10102, '2': 10102, '3': 10103}, 'NOME_DEPARTAMENTO': {'0': 'DECAP ', '1': 'DECAP ', '2': 'DECAP ', '3': 'DECAP '}, 'NOME_SECCIONAL': {'0': 'DEL.SEC.1º CENTRO ', '1': 'DEL.SEC.1º CENTRO ', '2': 'DEL.SEC.1º CENTRO ', '3': 'DEL.SEC.1º CENTRO '}, 'DELEGACIA': {'0': '01º D.P. SE ', '1': '02º D.P. BOM RETIRO ', '2': '02º D.P. BOM RETIRO ', '3': '03º D.P. CAMPOS ELISEOS '}, 'NOME_DEPARTAMENTO_CIRC': {'0': 'DECAP ', '1': 'DECAP ', '2': 'DECAP ', '3': 'DECAP '}, 'NOME_SECCIONAL_CIRC': {'0': 'DEL.SEC.1º CENTRO ', '1': 'DEL.SEC.1º CENTRO ', '2': 'DEL.SEC.1º CENTRO ', '3': 'DEL.SEC.8º SAO MATEUS '}, 'NOME_DELEGACIA_CIRC': {'0': '78º D.P. JARDINS ', '1': '03º D.P. CAMPOS ELISEOS ', '2': '03º D.P. CAMPOS ELISEOS ', '3': '49º D.P. SAO MATEUS '}, 'ANO': {'0': 2016, '1': 2016, '2': 2016, '3': 2016}, 'MES': {'0': 7, '1': 5, '2': 10, '3': 3}, 'FLAG_STATUS': {'0': 'C', '1': 'C', '2': 'C', '3': 'C'}, 'RUBRICA': {'0': 'Furto (art. 155)', '1': 'Roubo (art. 157)', '2': 'Drogas sem autorização ou em desacordo (Art.33, caput)', '3': 'Roubo (art. 157)'}, 'DESDOBRAMENTO': {'0': None, '1': None, '2': None, '3': None}, 'CONDUTA': {'0': 'TRANSEUNTE', '1': 'TRANSEUNTE', '2': None, '3': 'CARGA'}, 'LATITUDE': {'0': -23.56498421, '1': -23.5424764, '2': -23.5421834, '3': -23.60927461}, 'LONGITUDE': {'0': -46.65203458, '1': -46.64192812, '2': -46.64059853, '3': -46.4550868}, 'CIDADE': {'0': 'S.PAULO ', '1': 'S.PAULO ', '2': 'S.PAULO ', '3': 'S.PAULO '}, 'LOGRADOURO': {'0': 'AVENIDA PAULISTA ', '1': 'PRAÇA DA REPUBLICA ', '2': 'RUA CONSELHEIRO NEBIAS ', '3': 'RUA MADUREIRA CALHEIROS '}, 'NUMERO_LOGRADOURO': {'0': '1000', '1': '0', '2': '0', '3': '15'}, 'FLAG_STATUS.1': {'0': 'C', '1': 'C', '2': 'C', '3': 'C'}, 'Unnamed: 21': {'0': None, '1': None, '2': None, '3': None}} <end_description>
2,512
0
4,843
2,512
69702536
<jupyter_start><jupyter_text>interventions Kaggle dataset identifier: interventions <jupyter_code>import pandas as pd df = pd.read_csv('interventions/interventions.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 656463 entries, 0 to 656462 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 656463 non-null int64 1 nct_id 656463 non-null object 2 intervention_type 656463 non-null object 3 name 656463 non-null object 4 description 564989 non-null object dtypes: int64(1), object(4) memory usage: 25.0+ MB <jupyter_text>Examples: { "id": 12283321, "nct_id": "NCT04727996", "intervention_type": "Drug", "name": "Sitravatinib", "description": "120 mg will be administered orally once daily" } { "id": 12283322, "nct_id": "NCT04727996", "intervention_type": "Drug", "name": "Tislelizumab", "description": "200 mg will be administered intravenously (IV) once every 3 weeks" } { "id": 12283327, "nct_id": "NCT04727944", "intervention_type": "Other", "name": "Reach and grasp tasks in healthy participants using MEG technique (Experiment 1)", "description": "Human participants will perform reach and grasp movements to various objects (e.g. a cube, sphere, or rod) driven either by perceived action affordances, or instruction cues. A rotating carousel will be used to present subjects with various objects affording different types of gr...(truncated)", } { "id": 12283328, "nct_id": "NCT04727944", "intervention_type": "Other", "name": "Reach and grasp tasks in healthy participants using EEG technique (Experiment 2)", "description": "Experiment 2 consists of a task of reaching for and grasping several objects (e.g. a cube, sphere, or rod) ; the task used for experiment 2 is the same as that used for experiment 1. EEG signals will be measured." } <jupyter_script># Importing certain tables and libraries # This notebook gives a first look at the data. Before, we downloaded the tables from postgre-sql where a copy of clinical trials data has stored. Unfortunately The data stored as separate tables, therefore every variable/value has its own table and id. Since we want to assign success rates by using phase, intervention type, status of the trial we need to merge these tables. # Here I import the tables, that I have found important for calculating success and failures import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) trials = pd.read_csv( "../input/studiess/allstudies.csv" ) # main table for studies. Has the informtion of phase, status, time frame... trials.head(5) keywords = pd.read_csv( "../input/keywords/keywords.csv" ) # I guess this table has the search words for trials, to make it easy to find keywords.head(5) int_oth_names = pd.read_csv( "../input/intervetionothernames/intervention_other_names.csv" ) # has the other name of the intervention int_oth_names.head(5) # this table can be helpful especially for sentiment analysis interventions = pd.read_csv("../input/interventions/interventions.csv") interventions.head(5) # main table for interventions drop_withdrawals = pd.read_csv("../input/dropwithdrawals/drop_withdrawals.csv") drop_withdrawals.head( 5 ) # this table has the reasons for failures or why a drug/intervention suspended/withdrawn design_groups = pd.read_csv("../input/designgroups/design_groups.csv") design_groups.head( 5 ) # this table has the same info with the "interventions" table, except for "group_type" column countries = pd.read_csv("../input/countries/countries.csv") countries.head(5) # info about countries, # "removed" column means, Removed location countries, Countries that appeared under listed location countries but were removed from the study record by the sponsor or investigator. conditions = pd.read_csv("../input/conditions/conditions.csv") conditions.head(5) # main table for conditions/diseases browse_interventions = pd.read_csv( "../input/browseinterventions/browse_interventions.csv" ) browse_interventions.head(5) # mesh_term of interventions browse_conditions = pd.read_csv("../input/browseconditions/browse_conditions.csv") browse_conditions.head(5) # mesh_term of conditions browse_conditions["mesh_term"].nunique() # 4048 mesh_head = pd.read_csv("../input/datassss/mesh_headings.csv") mesh_head.head( 5 ) # mesh term headings, sadly this table do not have an NCT_id below with mesh-terms, therefore its issue to merge with the other tables mesh_terms = pd.read_csv("../input/datassss/mesh_terms.csv") mesh_terms.head(5) sponsors = pd.read_csv("../input/sponsorparty/sponsors.csv") sponsors.head(5) # sponsors res_party = pd.read_csv("../input/sponsorparty/responsible_parties.csv") res_party.head(5) # responsible party of the trial # Below process up to next heading is cancelled. Below here, I tried first merging the tables afterwards dropping the duplicate values. But this leads a significant values loss. Only 20K values left from 300K values. So, you will see the codes as markdown since I did not activate this section. # trials.shape # (383720, 64), 383720 observations and 64 columns # conditions.shape # (651687, 4) # conditions.columns # cond_rename = conditions.rename(columns={'id': 'id_conditions', 'name': 'condition_name','downcase_name': 'condition_downcase_name' }) # merge_1 = pd.merge(trials, cond_rename, on="nct_id") # merge_1.duplicated(subset=["nct_id"], keep='first').sum() # 268859 number of duplicates # merge_1.head(3) # interventions.columns # ['id', 'nct_id', 'intervention_type', 'name', 'description'] # interventions.shape # (656463, 5) # interven_rename = interventions.rename(columns={'id': 'id_interventions', 'name': 'intervention_name','description': 'intervention_description' }) # I am renaming columns because, every table has a name column such as intervention and condition tables. Since name column is common in both tables, to prevent # confusion I coin them as intevention_name and condition_name # merge_2 = pd.merge(merge_1, interven_rename, on="nct_id") # with "left merge" we reference the nct_id based on first dataframe which is "Studies". # Means that even a column in studies do not have a match with the other dataframe it is not going to be removed. # merge_2.head(3) # Before merging more I will drop duplicates and some columns because shape of the dataframe is too big for this kernel # merge_3.shape # (7594259, 74) # merge_3.duplicated(ddsubset=["nct_id"], keep='first').sum() # 7482269 # merge_4 = merge_3.drop_duplicates(subset=['nct_id'], keep=False) # merge_4.shape # (21029, 74) # Significant data loss occured. 383 720 observations remains as 21 029 after merging the studies table with, interventions and conditions tables. Instead let s try first dropping duplicates after merge the common rows # Checking DataFrame # Tables of interest; # * trials # * keywords # * int_oth_names # * interventions # * drop_withdrawals # * design_groups # * countries # * conditions # * browse_interventions # * browse_conditions # * mesh_head # * mesh_terms # * sponsors # * res_party def data_check(dataframe): print("-------- column names ------------") print(dataframe.columns) print("-------- shape before merge ---------") print(dataframe.shape) print("--------- null values -------") print(dataframe.isnull().sum()) print("--------- any duplicates in NCT_İD ------") print(dataframe.duplicated(subset=["nct_id"], keep="first").sum()) data_check(trials) # bringing important columns instead all dataframe for faster results some_columns = [ "nct_id", "start_month_year", "start_date_type", "start_date", "completion_month_year", "completion_date", "study_type", "acronym", "brief_title", "official_title", "overall_status", "last_known_status", "phase", "enrollment", "enrollment_type", "why_stopped", "number_of_arms", "source", ] trials__ = trials[some_columns] trials__.shape # (383720, 18) trials__.head() # bringing last 15 years trials trials__["completion_date"] = pd.to_datetime(trials__["completion_date"]) trials_new = trials__.loc[ (trials__["completion_date"] >= "December 30 2004") & (trials__["completion_date"] < "August 1 2021") ] trials_new.shape # (276975, 16) min(trials_new["completion_date"]) # Timestamp('2004-12-31 00:00:00') max(trials_new["completion_date"]) # Timestamp('2021-07-31 00:00:00') # Basic Observations # What is the number of complited studies over years? pd.DatetimeIndex(trials_new["completion_date"]).year import plotly.graph_objs as go fig = go.Figure( go.Bar( x=trials_new.groupby(pd.DatetimeIndex(trials_new["completion_date"]).year) .agg("count")["nct_id"] .sort_values(ascending=False) .index, y=trials_new.groupby(pd.DatetimeIndex(trials_new["completion_date"]).year) .agg("count")["nct_id"] .sort_values(ascending=False) .values, textposition="outside", ) ) fig.update_layout(title="Studies Per Year") fig.show() # What is the general status of trials? from plotly import __version__ import cufflinks as cf from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) cf.go_offline() trials_new["overall_status"].value_counts().sort_values(ascending=True).iplot( kind="barh", xTitle="Count", color="green" ) # What is the most popular study type? labels = trials_new["study_type"].value_counts().index values = trials_new["study_type"].value_counts().values fig = go.Figure( data=[ go.Pie( labels=labels, values=values, textinfo="label+percent", insidetextorientation="radial", ) ] ) fig.show() # How large are Interventional Trials? interventional_studies = trials_new[trials_new["study_type"] == "Interventional"] # Convert to numeric ## interventional_studies['enrollment'] = interventional_studies['enrollment'].astype(int) # Remove the trials with recruitment status withdrawn and terminated enrollment = interventional_studies.loc[ (interventional_studies["overall_status"] != "Withdrawn") & (interventional_studies["overall_status"] != "Terminated") ] bins = [-1, 20, 40, 60, 100, 200, 400, 600, 1000] group_names = [ "< 20", "21-40", "41-60", "61-100", "101-200", "201-400", "401-600", ">600", ] categories = pd.cut(enrollment["enrollment"], bins, labels=group_names) # Add categories as column in dataframe enrollment["Category"] = categories # View value counts enrollment_counts = enrollment["Category"].value_counts().sort_index(ascending=True) enrollment_counts.iplot( kind="bar", title="Size of Interventional Trials", colors="LightGreen" ) # What are the percentage of the phases? labels = trials_new["phase"].value_counts().drop("Not Applicable").index values = trials_new["phase"].value_counts().drop("Not Applicable").values fig = go.Figure( data=[ go.Pie( labels=labels, values=values, textinfo="label", insidetextorientation="radial", ) ] ) fig.show() # Merging tables and determining the final dataframe # [In this study](https://dspace.mit.edu/handle/1721.1/112049), they calculate the "Drug" trials success rates by separating the data as phase by phase, such as Phase 2 to Phase 3, Phase 3 to Approval or Phase 2 to Approval. They classify success as "finishing the current phase of that drug" by referencing the status of trial. For instance if a phase 2 drug's status is suspended, they classify the drug as discontinued its development which means failure. Another example is if the drug status remains "unknown" for 18 months they classified the drug as failed or failed to move phase 3. They define for Phase 3 to Approval or P2 to Approval in a same fashion. # [Here](https://www.bio.org/sites/default/files/legacy/bioorg/docs/Clinical%20Development%20Success%20Rates%202006-2015%20-%20BIO,%20Biomedtracker,%20Amplion%202016.pdf), success calculated phase by phase and phase to approval. They calculate for every disease phase transition success and likelyhood of approvals. Such as Phase1 to Phase2, or Phase1 to Approval. trials_new.duplicated(subset=["nct_id"], keep="first").sum() interventions.duplicated(subset=["nct_id"], keep="first").sum() # 312 718 interventions_no_duplicates = interventions.drop_duplicates( subset=["nct_id"], keep=False ) interventions_no_duplicates.shape # (159496, 5) interventions_no_duplicates.columns # ['id', 'nct_id', 'intervention_type', 'name', 'description'] interventions_last_df = interventions_no_duplicates.rename( columns={ "id": "id_interventions", "name": "intervention_name", "description": "intervention_description", } ) int_oth_names.head(3) # the other name of the intervention can be usefull int_oth_names.duplicated(subset=["nct_id"], keep="first").sum() # 220469 int_oth_names_no_duplicates = int_oth_names.drop_duplicates( subset=["nct_id"], keep=False ) int_oth_names_no_duplicates.shape # (51903, 4) keywords.sample(5) # seems like this consists of resarch keywords for search engine keywords.duplicated(subset=["nct_id"], keep="first").sum() # 220469 keywords_no_duplicates = keywords.drop_duplicates(subset=["nct_id"], keep=False) keywords_no_duplicates.shape # (38655, 4) design_groups.head() # has the same information with interventions table design_groups.duplicated(subset=["nct_id"], keep="first").sum() # 355 989 design_groups_no_duplicates = design_groups.drop_duplicates( subset=["nct_id"], keep=False ) design_groups_no_duplicates.shape # (95851, 5) drop_withdrawals.head() # this has the withdrawal studies reasons countries.head() countries.duplicated(subset=["nct_id"], keep="first").sum() # 195 885 countries_no_duplicates = countries.drop_duplicates(subset=["nct_id"], keep=False) countries_no_duplicates.shape # (310944, 4) browse_interventions.head(10) # interventions browse mesh-terms browse_interventions.duplicated(subset=["nct_id"], keep="first").sum() # 126 300 browse_interventions_no_duplicates = browse_interventions.drop_duplicates( subset=["nct_id"], keep=False ) browse_interventions_no_duplicates.shape # (74467, 4) browse_conditions.head(10) # conditions browse mesh terms browse_conditions.duplicated(subset=["nct_id"], keep="first").sum() # 329 883 browse_conditions_no_duplicates = browse_conditions.drop_duplicates( subset=["nct_id"], keep=False ) browse_conditions_no_duplicates.shape # (143104, 4) conditions.head(8) # name of the conditions conditions.duplicated(subset=["nct_id"], keep="first").sum() # 268 859 conditions_no_duplicates = conditions.drop_duplicates(subset=["nct_id"], keep=False) conditions_no_duplicates.shape # (257 627, 4) conditions_no_duplicates.columns conditions_last_df = conditions_no_duplicates.rename( columns={ "id": "id_conditions", "name": "condition_name", "downcase_name": "condition_downcase_name", } ) # mesh terms do not have nct_id # mesh_head.head(5) # mesh term headings # mesh_head.duplicated(subset=["nct_id"], keep='first').sum() # 268 859 # conditions_no_duplicates = conditions.drop_duplicates(subset=['nct_id'], keep=False) # conditions_no_duplicates.shape # (257 627, 4) # mesh_terms.head(5) # conditions.duplicated(subset=["nct_id"], keep='first').sum() # 268 859 # conditions_no_duplicates = conditions.drop_duplicates(subset=['nct_id'], keep=False) # conditions_no_duplicates.shape # (257 627, 4) sponsors.head() sponsors.duplicated(subset=["nct_id"], keep="first").sum() # 230 392 sponsors_no_duplicates = sponsors.drop_duplicates(subset=["nct_id"], keep=False) sponsors_no_duplicates.shape # (255 921, 5) sponsors_no_duplicates.columns # ['id', 'nct_id', 'agency_class', 'lead_or_collaborator', 'name'] sponsors_last_df = sponsors_no_duplicates.rename( columns={ "id": "id_sponsor", "agency_class": "sponsor_agency_class", "lead_or_collaborator": "sponsor_lead_or_collaborator", "name": "sponsor_name", } ) res_party.head(5) res_party.duplicated(subset=["nct_id"], keep="first").sum() # 0 duplicates res_party.columns # ['id', 'nct_id', 'responsible_party_type', 'name', 'title','organization', 'affiliation'] res_party_last_df = res_party.rename( columns={ "id": "id_responsible_party", "name": "responsible_party_name", "title": "responsible_party_title", "organization": "responsible_party_organization", "affiliation": "responsible_party_affiliation", } ) outcome_analysis_raw = pd.read_csv("../input/outcome-analyses/outcome_analyses.csv") outcome_analysis_raw.columns outcome_analysis_raw.head(10) cols_in_ourcome_analy = ["nct_id", "p_value", "p_value_description", "method"] outcome_analysis = outcome_analysis_raw[cols_in_ourcome_analy] outcome_analysis.shape # (213198, 4) outcome_analysis.duplicated(subset=["nct_id"], keep="first").sum() # 195674 outcome_analysis_no_duplicates = outcome_analysis.drop_duplicates( subset=["nct_id"], keep=False ) outcome_analysis_no_duplicates.shape merge_1 = pd.merge(trials_new, conditions_last_df, on="nct_id") merge_1.shape # (192398, 21) merge_2 = pd.merge(merge_1, interventions_last_df, on="nct_id") merge_2.shape # (78823, 25) merge_2.columns merge_3 = pd.merge(merge_2, sponsors_last_df, on="nct_id") merge_3.shape # (55582, 29) merge_3.columns merge_4 = pd.merge(merge_3, res_party_last_df, on="nct_id") merge_4.shape # (52578, 35) merge_4.columns # merge_5 = pd.merge(merge_4, outcome_analysis, on="nct_id") # merge_5.shape # (13604, 38) # decrease the shape of the data tremendously merge_4.duplicated(subset=["nct_id"], keep="first").sum() # 0 duplicates dataframe = merge_4.copy() dataframe.sample(3) dataframe.to_csv("./dataframe.csv")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/702/69702536.ipynb
interventions
ilkeakar
[{"Id": 69702536, "ScriptId": 18876912, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6880870, "CreationDate": "08/02/2021 21:00:49", "VersionNumber": 13.0, "Title": "preparing the data", "EvaluationDate": "08/02/2021", "IsChange": false, "TotalLines": 355.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 355.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93175233, "KernelVersionId": 69702536, "SourceDatasetVersionId": 2447533}, {"Id": 93175232, "KernelVersionId": 69702536, "SourceDatasetVersionId": 2447370}]
[{"Id": 2447533, "DatasetId": 1481177, "DatasourceVersionId": 2489860, "CreatorUserId": 6880870, "LicenseName": "Unknown", "CreationDate": "07/21/2021 08:25:32", "VersionNumber": 1.0, "Title": "interventions", "Slug": "interventions", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1481177, "CreatorUserId": 6880870, "OwnerUserId": 6880870.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2447533.0, "CurrentDatasourceVersionId": 2489860.0, "ForumId": 1500847, "Type": 2, "CreationDate": "07/21/2021 08:25:32", "LastActivityDate": "07/21/2021", "TotalViews": 1364, "TotalDownloads": 6, "TotalVotes": 1, "TotalKernels": 2}]
[{"Id": 6880870, "UserName": "ilkeakar", "DisplayName": "ilkeakar", "RegisterDate": "03/07/2021", "PerformanceTier": 0}]
# Importing certain tables and libraries # This notebook gives a first look at the data. Before, we downloaded the tables from postgre-sql where a copy of clinical trials data has stored. Unfortunately The data stored as separate tables, therefore every variable/value has its own table and id. Since we want to assign success rates by using phase, intervention type, status of the trial we need to merge these tables. # Here I import the tables, that I have found important for calculating success and failures import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) trials = pd.read_csv( "../input/studiess/allstudies.csv" ) # main table for studies. Has the informtion of phase, status, time frame... trials.head(5) keywords = pd.read_csv( "../input/keywords/keywords.csv" ) # I guess this table has the search words for trials, to make it easy to find keywords.head(5) int_oth_names = pd.read_csv( "../input/intervetionothernames/intervention_other_names.csv" ) # has the other name of the intervention int_oth_names.head(5) # this table can be helpful especially for sentiment analysis interventions = pd.read_csv("../input/interventions/interventions.csv") interventions.head(5) # main table for interventions drop_withdrawals = pd.read_csv("../input/dropwithdrawals/drop_withdrawals.csv") drop_withdrawals.head( 5 ) # this table has the reasons for failures or why a drug/intervention suspended/withdrawn design_groups = pd.read_csv("../input/designgroups/design_groups.csv") design_groups.head( 5 ) # this table has the same info with the "interventions" table, except for "group_type" column countries = pd.read_csv("../input/countries/countries.csv") countries.head(5) # info about countries, # "removed" column means, Removed location countries, Countries that appeared under listed location countries but were removed from the study record by the sponsor or investigator. conditions = pd.read_csv("../input/conditions/conditions.csv") conditions.head(5) # main table for conditions/diseases browse_interventions = pd.read_csv( "../input/browseinterventions/browse_interventions.csv" ) browse_interventions.head(5) # mesh_term of interventions browse_conditions = pd.read_csv("../input/browseconditions/browse_conditions.csv") browse_conditions.head(5) # mesh_term of conditions browse_conditions["mesh_term"].nunique() # 4048 mesh_head = pd.read_csv("../input/datassss/mesh_headings.csv") mesh_head.head( 5 ) # mesh term headings, sadly this table do not have an NCT_id below with mesh-terms, therefore its issue to merge with the other tables mesh_terms = pd.read_csv("../input/datassss/mesh_terms.csv") mesh_terms.head(5) sponsors = pd.read_csv("../input/sponsorparty/sponsors.csv") sponsors.head(5) # sponsors res_party = pd.read_csv("../input/sponsorparty/responsible_parties.csv") res_party.head(5) # responsible party of the trial # Below process up to next heading is cancelled. Below here, I tried first merging the tables afterwards dropping the duplicate values. But this leads a significant values loss. Only 20K values left from 300K values. So, you will see the codes as markdown since I did not activate this section. # trials.shape # (383720, 64), 383720 observations and 64 columns # conditions.shape # (651687, 4) # conditions.columns # cond_rename = conditions.rename(columns={'id': 'id_conditions', 'name': 'condition_name','downcase_name': 'condition_downcase_name' }) # merge_1 = pd.merge(trials, cond_rename, on="nct_id") # merge_1.duplicated(subset=["nct_id"], keep='first').sum() # 268859 number of duplicates # merge_1.head(3) # interventions.columns # ['id', 'nct_id', 'intervention_type', 'name', 'description'] # interventions.shape # (656463, 5) # interven_rename = interventions.rename(columns={'id': 'id_interventions', 'name': 'intervention_name','description': 'intervention_description' }) # I am renaming columns because, every table has a name column such as intervention and condition tables. Since name column is common in both tables, to prevent # confusion I coin them as intevention_name and condition_name # merge_2 = pd.merge(merge_1, interven_rename, on="nct_id") # with "left merge" we reference the nct_id based on first dataframe which is "Studies". # Means that even a column in studies do not have a match with the other dataframe it is not going to be removed. # merge_2.head(3) # Before merging more I will drop duplicates and some columns because shape of the dataframe is too big for this kernel # merge_3.shape # (7594259, 74) # merge_3.duplicated(ddsubset=["nct_id"], keep='first').sum() # 7482269 # merge_4 = merge_3.drop_duplicates(subset=['nct_id'], keep=False) # merge_4.shape # (21029, 74) # Significant data loss occured. 383 720 observations remains as 21 029 after merging the studies table with, interventions and conditions tables. Instead let s try first dropping duplicates after merge the common rows # Checking DataFrame # Tables of interest; # * trials # * keywords # * int_oth_names # * interventions # * drop_withdrawals # * design_groups # * countries # * conditions # * browse_interventions # * browse_conditions # * mesh_head # * mesh_terms # * sponsors # * res_party def data_check(dataframe): print("-------- column names ------------") print(dataframe.columns) print("-------- shape before merge ---------") print(dataframe.shape) print("--------- null values -------") print(dataframe.isnull().sum()) print("--------- any duplicates in NCT_İD ------") print(dataframe.duplicated(subset=["nct_id"], keep="first").sum()) data_check(trials) # bringing important columns instead all dataframe for faster results some_columns = [ "nct_id", "start_month_year", "start_date_type", "start_date", "completion_month_year", "completion_date", "study_type", "acronym", "brief_title", "official_title", "overall_status", "last_known_status", "phase", "enrollment", "enrollment_type", "why_stopped", "number_of_arms", "source", ] trials__ = trials[some_columns] trials__.shape # (383720, 18) trials__.head() # bringing last 15 years trials trials__["completion_date"] = pd.to_datetime(trials__["completion_date"]) trials_new = trials__.loc[ (trials__["completion_date"] >= "December 30 2004") & (trials__["completion_date"] < "August 1 2021") ] trials_new.shape # (276975, 16) min(trials_new["completion_date"]) # Timestamp('2004-12-31 00:00:00') max(trials_new["completion_date"]) # Timestamp('2021-07-31 00:00:00') # Basic Observations # What is the number of complited studies over years? pd.DatetimeIndex(trials_new["completion_date"]).year import plotly.graph_objs as go fig = go.Figure( go.Bar( x=trials_new.groupby(pd.DatetimeIndex(trials_new["completion_date"]).year) .agg("count")["nct_id"] .sort_values(ascending=False) .index, y=trials_new.groupby(pd.DatetimeIndex(trials_new["completion_date"]).year) .agg("count")["nct_id"] .sort_values(ascending=False) .values, textposition="outside", ) ) fig.update_layout(title="Studies Per Year") fig.show() # What is the general status of trials? from plotly import __version__ import cufflinks as cf from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) cf.go_offline() trials_new["overall_status"].value_counts().sort_values(ascending=True).iplot( kind="barh", xTitle="Count", color="green" ) # What is the most popular study type? labels = trials_new["study_type"].value_counts().index values = trials_new["study_type"].value_counts().values fig = go.Figure( data=[ go.Pie( labels=labels, values=values, textinfo="label+percent", insidetextorientation="radial", ) ] ) fig.show() # How large are Interventional Trials? interventional_studies = trials_new[trials_new["study_type"] == "Interventional"] # Convert to numeric ## interventional_studies['enrollment'] = interventional_studies['enrollment'].astype(int) # Remove the trials with recruitment status withdrawn and terminated enrollment = interventional_studies.loc[ (interventional_studies["overall_status"] != "Withdrawn") & (interventional_studies["overall_status"] != "Terminated") ] bins = [-1, 20, 40, 60, 100, 200, 400, 600, 1000] group_names = [ "< 20", "21-40", "41-60", "61-100", "101-200", "201-400", "401-600", ">600", ] categories = pd.cut(enrollment["enrollment"], bins, labels=group_names) # Add categories as column in dataframe enrollment["Category"] = categories # View value counts enrollment_counts = enrollment["Category"].value_counts().sort_index(ascending=True) enrollment_counts.iplot( kind="bar", title="Size of Interventional Trials", colors="LightGreen" ) # What are the percentage of the phases? labels = trials_new["phase"].value_counts().drop("Not Applicable").index values = trials_new["phase"].value_counts().drop("Not Applicable").values fig = go.Figure( data=[ go.Pie( labels=labels, values=values, textinfo="label", insidetextorientation="radial", ) ] ) fig.show() # Merging tables and determining the final dataframe # [In this study](https://dspace.mit.edu/handle/1721.1/112049), they calculate the "Drug" trials success rates by separating the data as phase by phase, such as Phase 2 to Phase 3, Phase 3 to Approval or Phase 2 to Approval. They classify success as "finishing the current phase of that drug" by referencing the status of trial. For instance if a phase 2 drug's status is suspended, they classify the drug as discontinued its development which means failure. Another example is if the drug status remains "unknown" for 18 months they classified the drug as failed or failed to move phase 3. They define for Phase 3 to Approval or P2 to Approval in a same fashion. # [Here](https://www.bio.org/sites/default/files/legacy/bioorg/docs/Clinical%20Development%20Success%20Rates%202006-2015%20-%20BIO,%20Biomedtracker,%20Amplion%202016.pdf), success calculated phase by phase and phase to approval. They calculate for every disease phase transition success and likelyhood of approvals. Such as Phase1 to Phase2, or Phase1 to Approval. trials_new.duplicated(subset=["nct_id"], keep="first").sum() interventions.duplicated(subset=["nct_id"], keep="first").sum() # 312 718 interventions_no_duplicates = interventions.drop_duplicates( subset=["nct_id"], keep=False ) interventions_no_duplicates.shape # (159496, 5) interventions_no_duplicates.columns # ['id', 'nct_id', 'intervention_type', 'name', 'description'] interventions_last_df = interventions_no_duplicates.rename( columns={ "id": "id_interventions", "name": "intervention_name", "description": "intervention_description", } ) int_oth_names.head(3) # the other name of the intervention can be usefull int_oth_names.duplicated(subset=["nct_id"], keep="first").sum() # 220469 int_oth_names_no_duplicates = int_oth_names.drop_duplicates( subset=["nct_id"], keep=False ) int_oth_names_no_duplicates.shape # (51903, 4) keywords.sample(5) # seems like this consists of resarch keywords for search engine keywords.duplicated(subset=["nct_id"], keep="first").sum() # 220469 keywords_no_duplicates = keywords.drop_duplicates(subset=["nct_id"], keep=False) keywords_no_duplicates.shape # (38655, 4) design_groups.head() # has the same information with interventions table design_groups.duplicated(subset=["nct_id"], keep="first").sum() # 355 989 design_groups_no_duplicates = design_groups.drop_duplicates( subset=["nct_id"], keep=False ) design_groups_no_duplicates.shape # (95851, 5) drop_withdrawals.head() # this has the withdrawal studies reasons countries.head() countries.duplicated(subset=["nct_id"], keep="first").sum() # 195 885 countries_no_duplicates = countries.drop_duplicates(subset=["nct_id"], keep=False) countries_no_duplicates.shape # (310944, 4) browse_interventions.head(10) # interventions browse mesh-terms browse_interventions.duplicated(subset=["nct_id"], keep="first").sum() # 126 300 browse_interventions_no_duplicates = browse_interventions.drop_duplicates( subset=["nct_id"], keep=False ) browse_interventions_no_duplicates.shape # (74467, 4) browse_conditions.head(10) # conditions browse mesh terms browse_conditions.duplicated(subset=["nct_id"], keep="first").sum() # 329 883 browse_conditions_no_duplicates = browse_conditions.drop_duplicates( subset=["nct_id"], keep=False ) browse_conditions_no_duplicates.shape # (143104, 4) conditions.head(8) # name of the conditions conditions.duplicated(subset=["nct_id"], keep="first").sum() # 268 859 conditions_no_duplicates = conditions.drop_duplicates(subset=["nct_id"], keep=False) conditions_no_duplicates.shape # (257 627, 4) conditions_no_duplicates.columns conditions_last_df = conditions_no_duplicates.rename( columns={ "id": "id_conditions", "name": "condition_name", "downcase_name": "condition_downcase_name", } ) # mesh terms do not have nct_id # mesh_head.head(5) # mesh term headings # mesh_head.duplicated(subset=["nct_id"], keep='first').sum() # 268 859 # conditions_no_duplicates = conditions.drop_duplicates(subset=['nct_id'], keep=False) # conditions_no_duplicates.shape # (257 627, 4) # mesh_terms.head(5) # conditions.duplicated(subset=["nct_id"], keep='first').sum() # 268 859 # conditions_no_duplicates = conditions.drop_duplicates(subset=['nct_id'], keep=False) # conditions_no_duplicates.shape # (257 627, 4) sponsors.head() sponsors.duplicated(subset=["nct_id"], keep="first").sum() # 230 392 sponsors_no_duplicates = sponsors.drop_duplicates(subset=["nct_id"], keep=False) sponsors_no_duplicates.shape # (255 921, 5) sponsors_no_duplicates.columns # ['id', 'nct_id', 'agency_class', 'lead_or_collaborator', 'name'] sponsors_last_df = sponsors_no_duplicates.rename( columns={ "id": "id_sponsor", "agency_class": "sponsor_agency_class", "lead_or_collaborator": "sponsor_lead_or_collaborator", "name": "sponsor_name", } ) res_party.head(5) res_party.duplicated(subset=["nct_id"], keep="first").sum() # 0 duplicates res_party.columns # ['id', 'nct_id', 'responsible_party_type', 'name', 'title','organization', 'affiliation'] res_party_last_df = res_party.rename( columns={ "id": "id_responsible_party", "name": "responsible_party_name", "title": "responsible_party_title", "organization": "responsible_party_organization", "affiliation": "responsible_party_affiliation", } ) outcome_analysis_raw = pd.read_csv("../input/outcome-analyses/outcome_analyses.csv") outcome_analysis_raw.columns outcome_analysis_raw.head(10) cols_in_ourcome_analy = ["nct_id", "p_value", "p_value_description", "method"] outcome_analysis = outcome_analysis_raw[cols_in_ourcome_analy] outcome_analysis.shape # (213198, 4) outcome_analysis.duplicated(subset=["nct_id"], keep="first").sum() # 195674 outcome_analysis_no_duplicates = outcome_analysis.drop_duplicates( subset=["nct_id"], keep=False ) outcome_analysis_no_duplicates.shape merge_1 = pd.merge(trials_new, conditions_last_df, on="nct_id") merge_1.shape # (192398, 21) merge_2 = pd.merge(merge_1, interventions_last_df, on="nct_id") merge_2.shape # (78823, 25) merge_2.columns merge_3 = pd.merge(merge_2, sponsors_last_df, on="nct_id") merge_3.shape # (55582, 29) merge_3.columns merge_4 = pd.merge(merge_3, res_party_last_df, on="nct_id") merge_4.shape # (52578, 35) merge_4.columns # merge_5 = pd.merge(merge_4, outcome_analysis, on="nct_id") # merge_5.shape # (13604, 38) # decrease the shape of the data tremendously merge_4.duplicated(subset=["nct_id"], keep="first").sum() # 0 duplicates dataframe = merge_4.copy() dataframe.sample(3) dataframe.to_csv("./dataframe.csv")
[{"interventions/interventions.csv": {"column_names": "[\"id\", \"nct_id\", \"intervention_type\", \"name\", \"description\"]", "column_data_types": "{\"id\": \"int64\", \"nct_id\": \"object\", \"intervention_type\": \"object\", \"name\": \"object\", \"description\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 656463 entries, 0 to 656462\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 656463 non-null int64 \n 1 nct_id 656463 non-null object\n 2 intervention_type 656463 non-null object\n 3 name 656463 non-null object\n 4 description 564989 non-null object\ndtypes: int64(1), object(4)\nmemory usage: 25.0+ MB\n", "summary": "{\"id\": {\"count\": 656463.0, \"mean\": 12727239.709438307, \"std\": 270817.23013447033, \"min\": 12283321.0, \"25%\": 12517311.5, \"50%\": 12698393.0, \"75%\": 12868857.5, \"max\": 13298466.0}}", "examples": "{\"id\":{\"0\":12283321,\"1\":12283322,\"2\":12283327,\"3\":12283328},\"nct_id\":{\"0\":\"NCT04727996\",\"1\":\"NCT04727996\",\"2\":\"NCT04727944\",\"3\":\"NCT04727944\"},\"intervention_type\":{\"0\":\"Drug\",\"1\":\"Drug\",\"2\":\"Other\",\"3\":\"Other\"},\"name\":{\"0\":\"Sitravatinib\",\"1\":\"Tislelizumab\",\"2\":\"Reach and grasp tasks in healthy participants using MEG technique (Experiment 1)\",\"3\":\"Reach and grasp tasks in healthy participants using EEG technique (Experiment 2)\"},\"description\":{\"0\":\"120 mg will be administered orally once daily\",\"1\":\"200 mg will be administered intravenously (IV) once every 3 weeks\",\"2\":\"Human participants will perform reach and grasp movements to various objects (e.g. a cube, sphere, or rod) driven either by perceived action affordances, or instruction cues. A rotating carousel will be used to present subjects with various objects affording different types of grasps (e.g. a precision pinch, a whole hand 'power' grasp, or a tripod grasp).\\r\\nPrior to the experiment, subjects will be tested outside the scanner by asking them to grasp each object as they would naturally to ensure that each object elicits the expected grasp type. Subject-specific, 3D-printed head-casts will be created based on high resolution MRI scans from each subject, and worn by subjects during the MEG experiment (Experiment 1) to reduce within-session head movement associated with reaching and grasping.\",\"3\":\"Experiment 2 consists of a task of reaching for and grasping several objects (e.g. a cube, sphere, or rod) ; the task used for experiment 2 is the same as that used for experiment 1. EEG signals will be measured.\"}}"}}]
true
15
<start_data_description><data_path>interventions/interventions.csv: <column_names> ['id', 'nct_id', 'intervention_type', 'name', 'description'] <column_types> {'id': 'int64', 'nct_id': 'object', 'intervention_type': 'object', 'name': 'object', 'description': 'object'} <dataframe_Summary> {'id': {'count': 656463.0, 'mean': 12727239.709438307, 'std': 270817.23013447033, 'min': 12283321.0, '25%': 12517311.5, '50%': 12698393.0, '75%': 12868857.5, 'max': 13298466.0}} <dataframe_info> RangeIndex: 656463 entries, 0 to 656462 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 656463 non-null int64 1 nct_id 656463 non-null object 2 intervention_type 656463 non-null object 3 name 656463 non-null object 4 description 564989 non-null object dtypes: int64(1), object(4) memory usage: 25.0+ MB <some_examples> {'id': {'0': 12283321, '1': 12283322, '2': 12283327, '3': 12283328}, 'nct_id': {'0': 'NCT04727996', '1': 'NCT04727996', '2': 'NCT04727944', '3': 'NCT04727944'}, 'intervention_type': {'0': 'Drug', '1': 'Drug', '2': 'Other', '3': 'Other'}, 'name': {'0': 'Sitravatinib', '1': 'Tislelizumab', '2': 'Reach and grasp tasks in healthy participants using MEG technique (Experiment 1)', '3': 'Reach and grasp tasks in healthy participants using EEG technique (Experiment 2)'}, 'description': {'0': '120 mg will be administered orally once daily', '1': '200 mg will be administered intravenously (IV) once every 3 weeks', '2': "Human participants will perform reach and grasp movements to various objects (e.g. a cube, sphere, or rod) driven either by perceived action affordances, or instruction cues. A rotating carousel will be used to present subjects with various objects affording different types of grasps (e.g. a precision pinch, a whole hand 'power' grasp, or a tripod grasp).\r\nPrior to the experiment, subjects will be tested outside the scanner by asking them to grasp each object as they would naturally to ensure that each object elicits the expected grasp type. Subject-specific, 3D-printed head-casts will be created based on high resolution MRI scans from each subject, and worn by subjects during the MEG experiment (Experiment 1) to reduce within-session head movement associated with reaching and grasping.", '3': 'Experiment 2 consists of a task of reaching for and grasping several objects (e.g. a cube, sphere, or rod) ; the task used for experiment 2 is the same as that used for experiment 1. EEG signals will be measured.'}} <end_description>
5,131
0
5,794
5,131
69702018
# # Introduction # The sinking of Titanic is one of the most notorious shipwrecks in the history.In 1912,during her voyage, the Titanic sank after colliding with an iceberg,killing 1502 out of 2224 passengers and crew. # # Content: # 1. [Load and Check Data](#1) # 2. [Variable Description](#2) # * [Univariate Variable Analysis](#3) # * [Categorical Variable ](#4) # * [Numerical Variable ](#5) # 3. [Basic Data Analysis](#6) # 4. [Outlier Detection](#7) # 5. [Missing Value](#8) # * [Find Missing Value](#9) # * [Fill Missing Value](#10) import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt plt.style.use("seaborn-whitegrid") import seaborn as sns from collections import Counter import warnings warnings.filterwarnings("ignore") import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # plt.style.available # a = [1,2,3,4] # plt.plot(a) # plt.show() # # # Load and Check Data train_df = pd.read_csv("/kaggle/input/titanic/train.csv") test_df = pd.read_csv("/kaggle/input/titanic/test.csv") test_PassangerId = test_df["PassengerId"] train_df.columns train_df.head() train_df.describe() # # # Variable Description # 1. PassengerId: unique id number to each passanger # 1. Survived: passenger survive(1) or died(0) # 1. Pclass: passenger class # 1. Name: name # 1. Sex: gender of passenger # 1. Age: age of passanger # 1. SibSp: number of siblings/spouses # 1. Parch: number of parents/children # 1. Ticket: ticket number # 1. Fare: amount of money spent on ticket # 1. Cabin: cabin category # 1. Embarked: port where passenger embarked (C = Chesbourg, Q = Queenstown, S = Southampton) train_df.info() # * float64(2) : Fare and age # * int64(5) : Pclass, sibsp, parch, passengerId and survived # * object(5) : Cabin, embarked, ticket and sex # # Univariate Variable Analysis # * Categorical Variable: Survived, Sex, Pclass, Embarked, Cabin, Name, Sibsp and Parch # * Numerical Variable: Age, PassangerId and Fare # ## Categorical Variable def bar_plot(variable): """ " input: variable ex: "Sex" output: bar plot & value count """ # get feature var = train_df[variable] # count number of categorical variable(value/sample) varValue = var.value_counts() # visualize plt.figure(figsize=(9, 3)) plt.bar(varValue.index, varValue) plt.xticks(varValue.index, varValue.index.values) plt.ylabel("Frequency") plt.title(variable) plt.show() print("{}: \n {}".format(variable, varValue)) category1 = ["Survived", "Sex", "Pclass", "Embarked", "SibSp", "Parch"] for c in category1: bar_plot(c) category2 = ["Cabin", "Name", "Ticket"] for c in category2: print("{} \n".format(train_df[c].value_counts())) # # ## Numerical Variable def plot_hist(variable): plt.figure(figsize=(9, 3)) plt.hist(train_df[variable], bins=50) plt.xlabel(variable) plt.ylabel("Frequency") plt.title("{} distribution with hist".format(variable)) plt.show() numericVar = ["Fare", "Age", "PassengerId"] for n in numericVar: plot_hist(n) # # # Basic Data Analysis # * Pclass - Survived # * Sex - Survived # * SibSp - Survived # * Parch - Survived # train_df[["Pclass", "Survived"]] # Pclass vs Survived # train_df[["Pclass", "Survived"]].groupby(["Pclass"], as_index = False).mean() # Pclass vs Survived train_df[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # Sex vs Survived train_df[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # SibSp vs Survived train_df[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # Parch vs Survived train_df[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # # # Outlier Detection def detect_outliers(df, features): outlier_indices = [] for c in features: # 1st quartile Q1 = np.percentile(df[c], 25) # 3rd quartile Q3 = np.percentile(df[c], 75) # IQR IQR = Q3 - Q1 # Outlier step outlier_step = IQR * 1.5 # detect outlier and their indeces outlier_list_col = df[ (df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step) ].index # store indeces outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list(i for i, v in outlier_indices.items() if v > 2) return multiple_outliers # a = ["a", "a", "a", "a", "b", "b"] # Counter(a) train_df.loc[detect_outliers(train_df, ["Age", "SibSp", "Parch", "Fare"])] # drop outliers train_df = train_df.drop( detect_outliers(train_df, ["Age", "SibSp", "Parch", "Fare"]), axis=0 ).reset_index(drop=True) # # # Missing Value # * Find Missing Value # * Fill Missing Value train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.head() # # ## Find Missing Value # train_df.columns train_df.columns[train_df.isnull().any()] train_df.isnull().sum() # # ## Fill Missing Value # * Embarked has 2 missing value # * Fare has only 1 train_df[train_df["Embarked"].isnull()] train_df.boxplot(column="Fare", by="Embarked") plt.show() train_df["Embarked"] = train_df["Embarked"].fillna("c") train_df[train_df["Embarked"].isnull()] train_df[train_df["Fare"].isnull()] train_df["Fare"] = train_df["Fare"].fillna( np.mean(train_df[train_df["Pclass"] == 3]["Fare"]) ) train_df[train_df["Fare"].isnull()]
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/702/69702018.ipynb
null
null
[{"Id": 69702018, "ScriptId": 12091500, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4413808, "CreationDate": "08/02/2021 20:57:23", "VersionNumber": 3.0, "Title": "Titanic EDA", "EvaluationDate": "08/02/2021", "IsChange": false, "TotalLines": 233.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 233.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
null
null
null
null
# # Introduction # The sinking of Titanic is one of the most notorious shipwrecks in the history.In 1912,during her voyage, the Titanic sank after colliding with an iceberg,killing 1502 out of 2224 passengers and crew. # # Content: # 1. [Load and Check Data](#1) # 2. [Variable Description](#2) # * [Univariate Variable Analysis](#3) # * [Categorical Variable ](#4) # * [Numerical Variable ](#5) # 3. [Basic Data Analysis](#6) # 4. [Outlier Detection](#7) # 5. [Missing Value](#8) # * [Find Missing Value](#9) # * [Fill Missing Value](#10) import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt plt.style.use("seaborn-whitegrid") import seaborn as sns from collections import Counter import warnings warnings.filterwarnings("ignore") import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # plt.style.available # a = [1,2,3,4] # plt.plot(a) # plt.show() # # # Load and Check Data train_df = pd.read_csv("/kaggle/input/titanic/train.csv") test_df = pd.read_csv("/kaggle/input/titanic/test.csv") test_PassangerId = test_df["PassengerId"] train_df.columns train_df.head() train_df.describe() # # # Variable Description # 1. PassengerId: unique id number to each passanger # 1. Survived: passenger survive(1) or died(0) # 1. Pclass: passenger class # 1. Name: name # 1. Sex: gender of passenger # 1. Age: age of passanger # 1. SibSp: number of siblings/spouses # 1. Parch: number of parents/children # 1. Ticket: ticket number # 1. Fare: amount of money spent on ticket # 1. Cabin: cabin category # 1. Embarked: port where passenger embarked (C = Chesbourg, Q = Queenstown, S = Southampton) train_df.info() # * float64(2) : Fare and age # * int64(5) : Pclass, sibsp, parch, passengerId and survived # * object(5) : Cabin, embarked, ticket and sex # # Univariate Variable Analysis # * Categorical Variable: Survived, Sex, Pclass, Embarked, Cabin, Name, Sibsp and Parch # * Numerical Variable: Age, PassangerId and Fare # ## Categorical Variable def bar_plot(variable): """ " input: variable ex: "Sex" output: bar plot & value count """ # get feature var = train_df[variable] # count number of categorical variable(value/sample) varValue = var.value_counts() # visualize plt.figure(figsize=(9, 3)) plt.bar(varValue.index, varValue) plt.xticks(varValue.index, varValue.index.values) plt.ylabel("Frequency") plt.title(variable) plt.show() print("{}: \n {}".format(variable, varValue)) category1 = ["Survived", "Sex", "Pclass", "Embarked", "SibSp", "Parch"] for c in category1: bar_plot(c) category2 = ["Cabin", "Name", "Ticket"] for c in category2: print("{} \n".format(train_df[c].value_counts())) # # ## Numerical Variable def plot_hist(variable): plt.figure(figsize=(9, 3)) plt.hist(train_df[variable], bins=50) plt.xlabel(variable) plt.ylabel("Frequency") plt.title("{} distribution with hist".format(variable)) plt.show() numericVar = ["Fare", "Age", "PassengerId"] for n in numericVar: plot_hist(n) # # # Basic Data Analysis # * Pclass - Survived # * Sex - Survived # * SibSp - Survived # * Parch - Survived # train_df[["Pclass", "Survived"]] # Pclass vs Survived # train_df[["Pclass", "Survived"]].groupby(["Pclass"], as_index = False).mean() # Pclass vs Survived train_df[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # Sex vs Survived train_df[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # SibSp vs Survived train_df[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # Parch vs Survived train_df[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean().sort_values( by="Survived", ascending=False ) # # # Outlier Detection def detect_outliers(df, features): outlier_indices = [] for c in features: # 1st quartile Q1 = np.percentile(df[c], 25) # 3rd quartile Q3 = np.percentile(df[c], 75) # IQR IQR = Q3 - Q1 # Outlier step outlier_step = IQR * 1.5 # detect outlier and their indeces outlier_list_col = df[ (df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step) ].index # store indeces outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list(i for i, v in outlier_indices.items() if v > 2) return multiple_outliers # a = ["a", "a", "a", "a", "b", "b"] # Counter(a) train_df.loc[detect_outliers(train_df, ["Age", "SibSp", "Parch", "Fare"])] # drop outliers train_df = train_df.drop( detect_outliers(train_df, ["Age", "SibSp", "Parch", "Fare"]), axis=0 ).reset_index(drop=True) # # # Missing Value # * Find Missing Value # * Fill Missing Value train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.head() # # ## Find Missing Value # train_df.columns train_df.columns[train_df.isnull().any()] train_df.isnull().sum() # # ## Fill Missing Value # * Embarked has 2 missing value # * Fare has only 1 train_df[train_df["Embarked"].isnull()] train_df.boxplot(column="Fare", by="Embarked") plt.show() train_df["Embarked"] = train_df["Embarked"].fillna("c") train_df[train_df["Embarked"].isnull()] train_df[train_df["Fare"].isnull()] train_df["Fare"] = train_df["Fare"].fillna( np.mean(train_df[train_df["Pclass"] == 3]["Fare"]) ) train_df[train_df["Fare"].isnull()]
false
0
2,101
2
2,101
2,101
69702937
<jupyter_start><jupyter_text>EfficientnetWeights Kaggle dataset identifier: efficientnetweights <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import tensorflow as tf import os try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection print("Running on TPU ", tpu.cluster_spec().as_dict()["worker"]) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except ValueError: print("Not connected to a TPU runtime. Using CPU/GPU strategy") strategy = tf.distribute.MirroredStrategy() strategy.scope() # !conda install '/kaggle/input/pydicom-conda-helper/libjpeg-turbo-2.1.0-h7f98852_0.tar.bz2' -c conda-forge -y # !conda install '/kaggle/input/pydicom-conda-helper/libgcc-ng-9.3.0-h2828fa1_19.tar.bz2' -c conda-forge -y # !conda install '/kaggle/input/pydicom-conda-helper/gdcm-2.8.9-py37h500ead1_1.tar.bz2' -c conda-forge -y # !conda install '/kaggle/input/pydicom-conda-helper/conda-4.10.1-py37h89c1867_0.tar.bz2' -c conda-forge -y # !conda install '/kaggle/input/pydicom-conda-helper/certifi-2020.12.5-py37h89c1867_1.tar.bz2' -c conda-forge -y # !conda install '/kaggle/input/pydicom-conda-helper/openssl-1.1.1k-h7f98852_0.tar.bz2' -c conda-forge -y # !pip install efficientnet # import efficientnet.keras as efn import os import shutil import random import tensorflow as tf from tensorflow.keras.layers import ( Dense, Conv2D, Dropout, MaxPooling2D, AveragePooling2D, Flatten, Activation, BatchNormalization, Concatenate, ) from tensorflow.keras.models import Sequential from tensorflow.keras.applications import EfficientNetB7 from tensorflow.keras.optimizers import Adam, SGD, Adadelta import numpy as np import cv2 import matplotlib.pyplot as plt import pandas as pd from tensorflow.keras.preprocessing.image import ImageDataGenerator random.seed(7) preprocess_input = tf.keras.applications.efficientnet.preprocess_input # base_model = EfficientNetB7(weights='imagenet',include_top=False,input_shape=(600,600,3)) # base_model.trainable = False # base_model.summary() a = os.listdir("../input/efficientnetb7-dataset-augmented/real/train") a # for i in a: # os.mkdir('NewPreprocessed/{}'.format(i)) shutil.copytree( "../input/efficientnetb7-dataset-augmented/real/train", "/kaggle/tmp/NewPreprocessed", ) # from PIL import Image # import os, sys # def resize(path,savepath): # for item in os.listdir(path): # item_path = path+"/"+item # im = Image.open(item_path) # imResize = im.resize((600,600), Image.ANTIALIAS) # imResize.save(savepath +"/"+item.split('.')[0]+ '_resized.jpg', 'JPEG') # for i in a: # path = "../input/rsna-balenced-dataset/Processed/"+i # savepath = "NewPreprocessed/"+i # resize(path,savepath) for i in a: savepath = "NewPreprocessed/" print(len(os.listdir(savepath + i))) split_size = 0.9 def split_data(SOURCE, TRAINING, VALIDATION, SPLIT_SIZE): all_images = os.listdir(SOURCE) print(type(all_images)) random.shuffle(all_images) splitting_index = round(SPLIT_SIZE * len(all_images)) # print(splitting_index) # print(splitting_index+portion) train_images = all_images[:splitting_index] valid_images = all_images[splitting_index:] for img in train_images: shutil.copy(os.path.join(SOURCE, img), TRAINING) for img in valid_images: shutil.copy(os.path.join(SOURCE, img), VALIDATION) os.mkdir("train") os.mkdir("val") for i in a: source = "NewPreprocessed/" + i train = "train/" + i val = "val/" + i os.mkdir(train) os.mkdir(val) split_data(source, train, val, split_size) for i in a: train = "train/" + i val = "val/" + i print(len(os.listdir(train)), len(os.listdir(val))) batch_size = 64 # TRAINING_DIR = 'train' # train_datagen = tf.keras.preprocessing.image.ImageDataGenerator() # train_generator = train_datagen.flow_from_directory(TRAINING_DIR, # target_size=(600,600), # batch_size=16, # class_mode='categorical', # shuffle = True) # VALIDATION_DIR = 'val' # validation_datagen = ImageDataGenerator() # validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR, # target_size=(600,600), # batch_size=16, # class_mode='categorical', # shuffle = True) TRAINING_DIR = "train" train_ds = tf.keras.preprocessing.image_dataset_from_directory( TRAINING_DIR, shuffle=True, image_size=(600, 600), batch_size=batch_size, label_mode="categorical", ) VALIDATION_DIR = "val" val_ds = tf.keras.preprocessing.image_dataset_from_directory( VALIDATION_DIR, shuffle=True, image_size=(600, 600), label_mode="categorical", batch_size=batch_size, ) class_names = train_ds.class_names class_names # plt.figure(figsize=(10, 10)) # for images, labels in train_ds.take(1): # for i in range(9): # ax = plt.subplot(3, 3, i + 1) # plt.imshow(images[i].numpy().astype("uint8")) # plt.title(class_names[labels[i]]) # plt.axis("off") AUTOTUNE = tf.data.AUTOTUNE train_ds = train_ds.prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.prefetch(buffer_size=AUTOTUNE) # base_model = EfficientNetB7(weights='imagenet',include_top=False,input_shape=(600,600,3)) # base_model.trainable = False # for i in base_model.layers[-10:]: # i.trainable = True # print(i.trainable) # for i in base_model.layers[-20:]: # print(i.trainable) def create_model(): inputs = tf.keras.Input(shape=(600, 600, 3)) preprocess_input = tf.keras.applications.efficientnet.preprocess_input data_augmentation = tf.keras.Sequential( [ tf.keras.layers.experimental.preprocessing.RandomRotation(0.3), ] ) # global_average_layer = tf.keras.layers.GlobalAveragePooling2D() # prediction_layer2 = tf.keras.layers.Dense(512,activation="relu") prediction_2ndlast = tf.keras.layers.Dense(16, activation="relu") prediction_final = tf.keras.layers.Dense(4, activation="softmax") avg_pool = AveragePooling2D(pool_size=(2, 2), padding="same") flat = Flatten() x = data_augmentation(inputs) x = preprocess_input(x) base_model = EfficientNetB7(weights="imagenet", include_top=False, input_tensor=x) base_model.trainable = False # for layer in base_model.layers[:]: # if not isinstance(layer, tf.keras.layers.BatchNormalization): # layer.trainable = True x = avg_pool(base_model.output) # 10*10*2560 x = flat(x) # 256,000 a, b, c, d, e, f, g, h, i, j = tf.split( x, num_or_size_splits=10, axis=1 ) # 25,600 each path1 = tf.keras.Sequential( [ tf.keras.layers.Dense(256, activation="relu"), tf.keras.layers.Dense(16, activation="relu"), tf.keras.layers.Dense(4, activation="relu"), ] ) # sudden drop path2 = tf.keras.Sequential( [ tf.keras.layers.Dense(512, activation="relu"), tf.keras.layers.Dense(48, activation="relu"), tf.keras.layers.Dense(8, activation="relu"), tf.keras.layers.Dense(4, activation="relu"), ] ) # slower drop path3 = tf.keras.Sequential( [ tf.keras.layers.Dense(512, activation="relu"), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(64, activation="relu"), tf.keras.layers.Dense(16, activation="relu"), tf.keras.layers.Dense(4, activation="relu"), ] ) # slowest drop path4 = tf.keras.Sequential( [ tf.keras.layers.Dense(256, activation="relu"), tf.keras.layers.Dense(32, activation="relu"), tf.keras.layers.Dense(4, activation="relu"), ] ) # my opinion:ideal drop a = path1(a) b = path2(b) c = path3(c) d = path4(d) e = path1(e) f = path2(f) g = path3(g) h = path4(h) i = path4(i) j = path4(j) x = Concatenate(axis=1)([a, b, c, d, e, f, g, h, i, j]) x = prediction_2ndlast(x) outputs = prediction_final(x) model = tf.keras.Model(inputs, outputs) return model initial_learning_rate = 0.5 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=2000, decay_rate=0.1, staircase=False ) with strategy.scope(): Model = create_model() optimizer = Adam(learning_rate=lr_schedule) Model.compile( loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"] ) # Model = tf.keras.models.load_model("/kaggle/input/test-unbal/my_model.h5") history = Model.fit(train_ds, epochs=100, batch_size=batch_size, validation_data=val_ds) Model.save("../working/weights/my_model.h5") Model.summary() # Model.evaluate(val_ds, batch_size=128) # with strategy.scope(): # Model = create_model() # optimizer = SGD(learning_rate=lr_schedule) # Model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy']) # Model = tf.keras.models.load_model("../input/efficientnetb7/weights/my_model.h5") # history = Model.fit(train_ds,epochs=10,batch_size=batch_size,validation_data=val_ds) # Model.save('../working/weights/my_model.h5') # with strategy.scope(): # Model = create_model() # optimizer = Adadelta(learning_rate=lr_schedule) # Model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy']) # Model = tf.keras.models.load_model("../input/efficientnetb7/weights/my_model.h5") # history = Model.fit(train_ds,epochs=6,batch_size=batch_size,validation_data=val_ds) # Model.save('../working/weights/my_model.h5') print(Model.input_shape, Model.output_shape) # Model.summary() # history = Model.fit(train_generator,epochs=40,batch_size=16,validation_data=validation_generator) # print(len(base_model.layers)) # with strategy.scope(): # os.mkdir("../working/weights") # Model.save_weights("../working/weights/weights.pt") # Loading and saving # os.rmdir("../working/weights") # Model = tf.keras.models.load_model("../input/efficientnetb7/weights/my_model.h5") # history = Model.fit(train_ds,epochs=25,batch_size=16,validation_data=val_ds) # Model = tf.keras.models.load_model("/kaggle/input/efficientnetweights/weights/my_model.h5") # Model = tf.keras.models.load_model("/kaggle/input/efnetb7-layers-increased-more-trainable-layers/weights/my_model.h5") # Model = create_model() # Model.load_weights("../input/efficientnetweights/weights/") # Model.evaluate(val_ds) # def unfreeze_model(Model): # # We unfreeze the top 20 layers while leaving BatchNorm layers frozen # for layer in Model.layers[-30:]: # if not isinstance(layer, tf.keras.layers.BatchNormalization): # layer.trainable = True # optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001) # Model.compile( # optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"] # ) # unfreeze_model(Model) # epochs = 10 # @param {type: "slider", min:8, max:50} # hist = Model.fit(train_ds, epochs=epochs,batch_size=batch_size, validation_data=val_ds) # plot_hist(hist) # Model.layers
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/702/69702937.ipynb
efficientnetweights
ajinkyadeshpande39
[{"Id": 69702937, "ScriptId": 18881977, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5688091, "CreationDate": "08/02/2021 21:03:48", "VersionNumber": 10.0, "Title": "Efficientnetb7", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 360.0, "LinesInsertedFromPrevious": 68.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 292.0, "LinesInsertedFromFork": 136.0, "LinesDeletedFromFork": 67.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 224.0, "TotalVotes": 0}]
[{"Id": 93175718, "KernelVersionId": 69702937, "SourceDatasetVersionId": 2451127}, {"Id": 93175716, "KernelVersionId": 69702937, "SourceDatasetVersionId": 2434198}, {"Id": 93175717, "KernelVersionId": 69702937, "SourceDatasetVersionId": 2445158}]
[{"Id": 2451127, "DatasetId": 1461496, "DatasourceVersionId": 2493477, "CreatorUserId": 7528647, "LicenseName": "Unknown", "CreationDate": "07/22/2021 09:38:12", "VersionNumber": 7.0, "Title": "EfficientnetWeights", "Slug": "efficientnetweights", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Data Update 2021/07/22", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1461496, "CreatorUserId": 7528647, "OwnerUserId": 7528647.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2451127.0, "CurrentDatasourceVersionId": 2493477.0, "ForumId": 1481092, "Type": 2, "CreationDate": "07/11/2021 16:22:24", "LastActivityDate": "07/11/2021", "TotalViews": 1296, "TotalDownloads": 3, "TotalVotes": 1, "TotalKernels": 4}]
[{"Id": 7528647, "UserName": "ajinkyadeshpande39", "DisplayName": "AJINKYA DESHPANDE", "RegisterDate": "05/28/2021", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import tensorflow as tf import os try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection print("Running on TPU ", tpu.cluster_spec().as_dict()["worker"]) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except ValueError: print("Not connected to a TPU runtime. Using CPU/GPU strategy") strategy = tf.distribute.MirroredStrategy() strategy.scope() # !conda install '/kaggle/input/pydicom-conda-helper/libjpeg-turbo-2.1.0-h7f98852_0.tar.bz2' -c conda-forge -y # !conda install '/kaggle/input/pydicom-conda-helper/libgcc-ng-9.3.0-h2828fa1_19.tar.bz2' -c conda-forge -y # !conda install '/kaggle/input/pydicom-conda-helper/gdcm-2.8.9-py37h500ead1_1.tar.bz2' -c conda-forge -y # !conda install '/kaggle/input/pydicom-conda-helper/conda-4.10.1-py37h89c1867_0.tar.bz2' -c conda-forge -y # !conda install '/kaggle/input/pydicom-conda-helper/certifi-2020.12.5-py37h89c1867_1.tar.bz2' -c conda-forge -y # !conda install '/kaggle/input/pydicom-conda-helper/openssl-1.1.1k-h7f98852_0.tar.bz2' -c conda-forge -y # !pip install efficientnet # import efficientnet.keras as efn import os import shutil import random import tensorflow as tf from tensorflow.keras.layers import ( Dense, Conv2D, Dropout, MaxPooling2D, AveragePooling2D, Flatten, Activation, BatchNormalization, Concatenate, ) from tensorflow.keras.models import Sequential from tensorflow.keras.applications import EfficientNetB7 from tensorflow.keras.optimizers import Adam, SGD, Adadelta import numpy as np import cv2 import matplotlib.pyplot as plt import pandas as pd from tensorflow.keras.preprocessing.image import ImageDataGenerator random.seed(7) preprocess_input = tf.keras.applications.efficientnet.preprocess_input # base_model = EfficientNetB7(weights='imagenet',include_top=False,input_shape=(600,600,3)) # base_model.trainable = False # base_model.summary() a = os.listdir("../input/efficientnetb7-dataset-augmented/real/train") a # for i in a: # os.mkdir('NewPreprocessed/{}'.format(i)) shutil.copytree( "../input/efficientnetb7-dataset-augmented/real/train", "/kaggle/tmp/NewPreprocessed", ) # from PIL import Image # import os, sys # def resize(path,savepath): # for item in os.listdir(path): # item_path = path+"/"+item # im = Image.open(item_path) # imResize = im.resize((600,600), Image.ANTIALIAS) # imResize.save(savepath +"/"+item.split('.')[0]+ '_resized.jpg', 'JPEG') # for i in a: # path = "../input/rsna-balenced-dataset/Processed/"+i # savepath = "NewPreprocessed/"+i # resize(path,savepath) for i in a: savepath = "NewPreprocessed/" print(len(os.listdir(savepath + i))) split_size = 0.9 def split_data(SOURCE, TRAINING, VALIDATION, SPLIT_SIZE): all_images = os.listdir(SOURCE) print(type(all_images)) random.shuffle(all_images) splitting_index = round(SPLIT_SIZE * len(all_images)) # print(splitting_index) # print(splitting_index+portion) train_images = all_images[:splitting_index] valid_images = all_images[splitting_index:] for img in train_images: shutil.copy(os.path.join(SOURCE, img), TRAINING) for img in valid_images: shutil.copy(os.path.join(SOURCE, img), VALIDATION) os.mkdir("train") os.mkdir("val") for i in a: source = "NewPreprocessed/" + i train = "train/" + i val = "val/" + i os.mkdir(train) os.mkdir(val) split_data(source, train, val, split_size) for i in a: train = "train/" + i val = "val/" + i print(len(os.listdir(train)), len(os.listdir(val))) batch_size = 64 # TRAINING_DIR = 'train' # train_datagen = tf.keras.preprocessing.image.ImageDataGenerator() # train_generator = train_datagen.flow_from_directory(TRAINING_DIR, # target_size=(600,600), # batch_size=16, # class_mode='categorical', # shuffle = True) # VALIDATION_DIR = 'val' # validation_datagen = ImageDataGenerator() # validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR, # target_size=(600,600), # batch_size=16, # class_mode='categorical', # shuffle = True) TRAINING_DIR = "train" train_ds = tf.keras.preprocessing.image_dataset_from_directory( TRAINING_DIR, shuffle=True, image_size=(600, 600), batch_size=batch_size, label_mode="categorical", ) VALIDATION_DIR = "val" val_ds = tf.keras.preprocessing.image_dataset_from_directory( VALIDATION_DIR, shuffle=True, image_size=(600, 600), label_mode="categorical", batch_size=batch_size, ) class_names = train_ds.class_names class_names # plt.figure(figsize=(10, 10)) # for images, labels in train_ds.take(1): # for i in range(9): # ax = plt.subplot(3, 3, i + 1) # plt.imshow(images[i].numpy().astype("uint8")) # plt.title(class_names[labels[i]]) # plt.axis("off") AUTOTUNE = tf.data.AUTOTUNE train_ds = train_ds.prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.prefetch(buffer_size=AUTOTUNE) # base_model = EfficientNetB7(weights='imagenet',include_top=False,input_shape=(600,600,3)) # base_model.trainable = False # for i in base_model.layers[-10:]: # i.trainable = True # print(i.trainable) # for i in base_model.layers[-20:]: # print(i.trainable) def create_model(): inputs = tf.keras.Input(shape=(600, 600, 3)) preprocess_input = tf.keras.applications.efficientnet.preprocess_input data_augmentation = tf.keras.Sequential( [ tf.keras.layers.experimental.preprocessing.RandomRotation(0.3), ] ) # global_average_layer = tf.keras.layers.GlobalAveragePooling2D() # prediction_layer2 = tf.keras.layers.Dense(512,activation="relu") prediction_2ndlast = tf.keras.layers.Dense(16, activation="relu") prediction_final = tf.keras.layers.Dense(4, activation="softmax") avg_pool = AveragePooling2D(pool_size=(2, 2), padding="same") flat = Flatten() x = data_augmentation(inputs) x = preprocess_input(x) base_model = EfficientNetB7(weights="imagenet", include_top=False, input_tensor=x) base_model.trainable = False # for layer in base_model.layers[:]: # if not isinstance(layer, tf.keras.layers.BatchNormalization): # layer.trainable = True x = avg_pool(base_model.output) # 10*10*2560 x = flat(x) # 256,000 a, b, c, d, e, f, g, h, i, j = tf.split( x, num_or_size_splits=10, axis=1 ) # 25,600 each path1 = tf.keras.Sequential( [ tf.keras.layers.Dense(256, activation="relu"), tf.keras.layers.Dense(16, activation="relu"), tf.keras.layers.Dense(4, activation="relu"), ] ) # sudden drop path2 = tf.keras.Sequential( [ tf.keras.layers.Dense(512, activation="relu"), tf.keras.layers.Dense(48, activation="relu"), tf.keras.layers.Dense(8, activation="relu"), tf.keras.layers.Dense(4, activation="relu"), ] ) # slower drop path3 = tf.keras.Sequential( [ tf.keras.layers.Dense(512, activation="relu"), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(64, activation="relu"), tf.keras.layers.Dense(16, activation="relu"), tf.keras.layers.Dense(4, activation="relu"), ] ) # slowest drop path4 = tf.keras.Sequential( [ tf.keras.layers.Dense(256, activation="relu"), tf.keras.layers.Dense(32, activation="relu"), tf.keras.layers.Dense(4, activation="relu"), ] ) # my opinion:ideal drop a = path1(a) b = path2(b) c = path3(c) d = path4(d) e = path1(e) f = path2(f) g = path3(g) h = path4(h) i = path4(i) j = path4(j) x = Concatenate(axis=1)([a, b, c, d, e, f, g, h, i, j]) x = prediction_2ndlast(x) outputs = prediction_final(x) model = tf.keras.Model(inputs, outputs) return model initial_learning_rate = 0.5 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=2000, decay_rate=0.1, staircase=False ) with strategy.scope(): Model = create_model() optimizer = Adam(learning_rate=lr_schedule) Model.compile( loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"] ) # Model = tf.keras.models.load_model("/kaggle/input/test-unbal/my_model.h5") history = Model.fit(train_ds, epochs=100, batch_size=batch_size, validation_data=val_ds) Model.save("../working/weights/my_model.h5") Model.summary() # Model.evaluate(val_ds, batch_size=128) # with strategy.scope(): # Model = create_model() # optimizer = SGD(learning_rate=lr_schedule) # Model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy']) # Model = tf.keras.models.load_model("../input/efficientnetb7/weights/my_model.h5") # history = Model.fit(train_ds,epochs=10,batch_size=batch_size,validation_data=val_ds) # Model.save('../working/weights/my_model.h5') # with strategy.scope(): # Model = create_model() # optimizer = Adadelta(learning_rate=lr_schedule) # Model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy']) # Model = tf.keras.models.load_model("../input/efficientnetb7/weights/my_model.h5") # history = Model.fit(train_ds,epochs=6,batch_size=batch_size,validation_data=val_ds) # Model.save('../working/weights/my_model.h5') print(Model.input_shape, Model.output_shape) # Model.summary() # history = Model.fit(train_generator,epochs=40,batch_size=16,validation_data=validation_generator) # print(len(base_model.layers)) # with strategy.scope(): # os.mkdir("../working/weights") # Model.save_weights("../working/weights/weights.pt") # Loading and saving # os.rmdir("../working/weights") # Model = tf.keras.models.load_model("../input/efficientnetb7/weights/my_model.h5") # history = Model.fit(train_ds,epochs=25,batch_size=16,validation_data=val_ds) # Model = tf.keras.models.load_model("/kaggle/input/efficientnetweights/weights/my_model.h5") # Model = tf.keras.models.load_model("/kaggle/input/efnetb7-layers-increased-more-trainable-layers/weights/my_model.h5") # Model = create_model() # Model.load_weights("../input/efficientnetweights/weights/") # Model.evaluate(val_ds) # def unfreeze_model(Model): # # We unfreeze the top 20 layers while leaving BatchNorm layers frozen # for layer in Model.layers[-30:]: # if not isinstance(layer, tf.keras.layers.BatchNormalization): # layer.trainable = True # optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001) # Model.compile( # optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"] # ) # unfreeze_model(Model) # epochs = 10 # @param {type: "slider", min:8, max:50} # hist = Model.fit(train_ds, epochs=epochs,batch_size=batch_size, validation_data=val_ds) # plot_hist(hist) # Model.layers
false
0
3,889
0
3,910
3,889
69702157
<jupyter_start><jupyter_text>Handwriting Recognition ### Overview This dataset consists of more than four hundred thousand handwritten names collected through charity projects. Character Recognition utilizes image processing technologies to convert characters on scanned documents into digital forms. It typically performs well in machine-printed fonts. However, it still poses difficult challenges for machines to recognize handwritten characters, because of the huge variation in individual writing styles. There are 206,799 first names and 207,024 surnames in total. The data was divided into a training set (331,059), testing set (41,382), and validation set (41,382) respectively. ### Content The input data here are hundreds of thousands of images of handwritten names. In the Data, you’ll find the transcribed images broken up into test, training, and validation sets. Image Lable follow the following naming format enabling you to extend the data set with your own data. | Image | URL | | | | | | --- | --- | --- | --- | --- | --- | | D2M | 15 | 0010079F | 0002 | 1 | first name.jpg | | D2M | 15 | 0010079F | 0002 | 1 | surname.jpg | | D2M | 15 | 0010079F | 0003 | 2 | surname.jpg | | D2M | 15 | 0010079F | 0004 | 3 | first name.jpg | | D2M | 15 | 0010079F | 0004 | 3 | surname.jpg | | D2M | 15 | 0010079F | 0005 | 4 | first name.jpg | | D2M | 15 | 0010079F | 0006 | 5 | first name.jpg | | D2M | 15 | 0010079F | 0006 | 5 | surname.jpg | | D2M | 15 | 0010079F | 0007 | 6 | first name.jpg | ### Inspiration The Inspiration of this is to explore the task of classifying handwritten text and to convert handwritten text into the digital format using various approaches out there Kaggle dataset identifier: handwriting-recognition <jupyter_script>import os import cv2 import random import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from keras import backend as K from keras.models import Model from keras.layers import ( Input, Conv2D, MaxPooling2D, Reshape, Bidirectional, LSTM, Dense, Lambda, Activation, BatchNormalization, Dropout, ) from keras.optimizers import Adam train = pd.read_csv("/kaggle/input/handwriting-recognition/written_name_train_v2.csv") valid = pd.read_csv( "/kaggle/input/handwriting-recognition/written_name_validation_v2.csv" ) plt.figure(figsize=(15, 10)) for i in range(6): ax = plt.subplot(2, 3, i + 1) img_dir = ( "/kaggle/input/handwriting-recognition/train_v2/train/" + train.loc[i, "FILENAME"] ) image = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE) plt.imshow(image, cmap="gray") plt.title(train.loc[i, "IDENTITY"], fontsize=12) plt.axis("off") plt.subplots_adjust(wspace=0.2, hspace=-0.8) # ## Cleaning Data print("Number of NaNs in train set : ", train["IDENTITY"].isnull().sum()) print("Number of NaNs in validation set : ", valid["IDENTITY"].isnull().sum()) train.dropna(axis=0, inplace=True) valid.dropna(axis=0, inplace=True) # Also, there are some images in our data with the label 'UNREADABLE'. Lets check those images and remove them. unreadable = train[train["IDENTITY"] == "UNREADABLE"] unreadable.reset_index(inplace=True, drop=True) plt.figure(figsize=(15, 10)) for i in range(6): ax = plt.subplot(2, 3, i + 1) img_dir = ( "/kaggle/input/handwriting-recognition/train_v2/train/" + unreadable.loc[i, "FILENAME"] ) image = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE) plt.imshow(image, cmap="gray") plt.title(unreadable.loc[i, "IDENTITY"], fontsize=12) plt.axis("off") plt.subplots_adjust(wspace=0.2, hspace=-0.8) train = train[train["IDENTITY"] != "UNREADABLE"] valid = valid[valid["IDENTITY"] != "UNREADABLE"] # There are some labels which are in lowercase. To maintain uniformity in the labels, I convert all the labels to uppercase. train["IDENTITY"] = train["IDENTITY"].str.upper() valid["IDENTITY"] = valid["IDENTITY"].str.upper() # Reset the index and we are done with cleaning. train.reset_index(inplace=True, drop=True) valid.reset_index(inplace=True, drop=True) # ## Preprocessing and preparing the images for training # * The images are loaded as grayscale and reshaped to width 256 and height 64. # * The width and height are cropped if they are greater than 256 and 64 respectively. If they are smaller, then the image is padded with white pixels. Finally the image is rotated clockwise to bring the image shape to (x, y). # * The image is then normalized to range [0, 1] def preprocess(img): (h, w) = img.shape final_img = np.ones([64, 256]) * 255 # blank white image # crop if w > 256: img = img[:, :256] if h > 64: img = img[:64, :] final_img[:h, :w] = img return cv2.rotate(final_img, cv2.ROTATE_90_CLOCKWISE) # The model will be trained on 30000 images and validate on 3000 images train_size = 30000 valid_size = 3000 train_x = [] for i in range(train_size): img_dir = ( "/kaggle/input/handwriting-recognition/train_v2/train/" + train.loc[i, "FILENAME"] ) image = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE) image = preprocess(image) image = image / 255.0 train_x.append(image) valid_x = [] for i in range(valid_size): img_dir = ( "/kaggle/input/handwriting-recognition/validation_v2/validation/" + valid.loc[i, "FILENAME"] ) image = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE) image = preprocess(image) image = image / 255.0 valid_x.append(image) train_x = np.array(train_x).reshape(-1, 256, 64, 1) valid_x = np.array(valid_x).reshape(-1, 256, 64, 1) # ## Preparing the labels for CTC Loss # Learn more about CTC loss and why its amazing for text recognition from [here](https://theailearner.com/2019/05/29/connectionist-temporal-classificationctc/). # The labels have to be converted to numbers which represent each character in the training set. The 'alphabets' consist of A-Z and three special characters (- ' and space). alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ-' " max_str_len = 24 # max length of input labels num_of_characters = len(alphabets) + 1 # +1 for ctc pseudo blank num_of_timestamps = 64 # max length of predicted labels def label_to_num(label): label_num = [] for ch in label: label_num.append(alphabets.find(ch)) return np.array(label_num) def num_to_label(num): ret = "" for ch in num: if ch == -1: # CTC Blank break else: ret += alphabets[ch] return ret name = "JEBASTIN" print(name, "\n", label_to_num(name)) # * **train_y** contains the true labels converted to numbers and padded with -1. The length of each label is equal to max_str_len. # * **train_label_len** contains the length of each true label (without padding) # * **train_input_len** contains the length of each predicted label. The length of all the predicted labels is constant i.e number of timestamps - 2. # * **train_output** is a dummy output for ctc loss. # train_y = np.ones([train_size, max_str_len]) * -1 train_label_len = np.zeros([train_size, 1]) train_input_len = np.ones([train_size, 1]) * (num_of_timestamps - 2) train_output = np.zeros([train_size]) for i in range(train_size): train_label_len[i] = len(train.loc[i, "IDENTITY"]) train_y[i, 0 : len(train.loc[i, "IDENTITY"])] = label_to_num( train.loc[i, "IDENTITY"] ) valid_y = np.ones([valid_size, max_str_len]) * -1 valid_label_len = np.zeros([valid_size, 1]) valid_input_len = np.ones([valid_size, 1]) * (num_of_timestamps - 2) valid_output = np.zeros([valid_size]) for i in range(valid_size): valid_label_len[i] = len(valid.loc[i, "IDENTITY"]) valid_y[i, 0 : len(valid.loc[i, "IDENTITY"])] = label_to_num( valid.loc[i, "IDENTITY"] ) print( "True label : ", train.loc[100, "IDENTITY"], "\ntrain_y : ", train_y[100], "\ntrain_label_len : ", train_label_len[100], "\ntrain_input_len : ", train_input_len[100], ) # ## Building our model # input_data = Input(shape=(256, 64, 1), name="input") inner = Conv2D( 32, (3, 3), padding="same", name="conv1", kernel_initializer="he_normal" )(input_data) inner = BatchNormalization()(inner) inner = Activation("relu")(inner) inner = MaxPooling2D(pool_size=(2, 2), name="max1")(inner) inner = Conv2D( 64, (3, 3), padding="same", name="conv2", kernel_initializer="he_normal" )(inner) inner = BatchNormalization()(inner) inner = Activation("relu")(inner) inner = MaxPooling2D(pool_size=(2, 2), name="max2")(inner) inner = Dropout(0.3)(inner) inner = Conv2D( 128, (3, 3), padding="same", name="conv3", kernel_initializer="he_normal" )(inner) inner = BatchNormalization()(inner) inner = Activation("relu")(inner) inner = MaxPooling2D(pool_size=(1, 2), name="max3")(inner) inner = Dropout(0.3)(inner) # CNN to RNN inner = Reshape(target_shape=((64, 1024)), name="reshape")(inner) inner = Dense(64, activation="relu", kernel_initializer="he_normal", name="dense1")( inner ) ## RNN inner = Bidirectional(LSTM(256, return_sequences=True), name="lstm1")(inner) inner = Bidirectional(LSTM(256, return_sequences=True), name="lstm2")(inner) ## OUTPUT inner = Dense(num_of_characters, kernel_initializer="he_normal", name="dense2")(inner) y_pred = Activation("softmax", name="softmax")(inner) model = Model(inputs=input_data, outputs=y_pred) model.summary() # The output shape of the predictions is (64, 30). The model predicts words of 64 characters and each character contains the probability of the 30 alphabets which we defined earlier. # the ctc loss function def ctc_lambda_func(args): y_pred, labels, input_length, label_length = args # the 2 is critical here since the first couple outputs of the RNN # tend to be garbage y_pred = y_pred[:, 2:, :] return K.ctc_batch_cost(labels, y_pred, input_length, label_length) labels = Input(name="gtruth_labels", shape=[max_str_len], dtype="float32") input_length = Input(name="input_length", shape=[1], dtype="int64") label_length = Input(name="label_length", shape=[1], dtype="int64") ctc_loss = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")( [y_pred, labels, input_length, label_length] ) model_final = Model( inputs=[input_data, labels, input_length, label_length], outputs=ctc_loss ) # ## Train our model # the loss calculation occurs elsewhere, so we use a dummy lambda function for the loss model_final.compile( loss={"ctc": lambda y_true, y_pred: y_pred}, optimizer=Adam(lr=0.0001) ) model_final.fit( x=[train_x, train_y, train_input_len, train_label_len], y=train_output, validation_data=( [valid_x, valid_y, valid_input_len, valid_label_len], valid_output, ), epochs=60, batch_size=128, ) # ## Check model performance on validation set preds = model.predict(valid_x) decoded = K.get_value( K.ctc_decode( preds, input_length=np.ones(preds.shape[0]) * preds.shape[1], greedy=True )[0][0] ) prediction = [] for i in range(valid_size): prediction.append(num_to_label(decoded[i])) y_true = valid.loc[0:valid_size, "IDENTITY"] correct_char = 0 total_char = 0 correct = 0 for i in range(valid_size): pr = prediction[i] tr = y_true[i] total_char += len(tr) for j in range(min(len(tr), len(pr))): if tr[j] == pr[j]: correct_char += 1 if pr == tr: correct += 1 print("Correct characters predicted : %.2f%%" % (correct_char * 100 / total_char)) print("Correct words predicted : %.2f%%" % (correct * 100 / valid_size)) # ## Some predictions on test set test = pd.read_csv( "/kaggle/input/handwriting-recognition/written_name_validation_v2.csv" ) plt.figure(figsize=(15, 10)) for i in range(6): ax = plt.subplot(2, 3, i + 1) img_dir = ( "/kaggle/input/handwriting-recognition/validation_v2/validation/" + test.loc[i, "FILENAME"] ) image = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE) plt.imshow(image, cmap="gray") image = preprocess(image) image = image / 255.0 pred = model.predict(image.reshape(1, 256, 64, 1)) decoded = K.get_value( K.ctc_decode( pred, input_length=np.ones(pred.shape[0]) * pred.shape[1], greedy=True )[0][0] ) plt.title(num_to_label(decoded[0]), fontsize=12) plt.axis("off") plt.subplots_adjust(wspace=0.2, hspace=-0.8)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/702/69702157.ipynb
handwriting-recognition
landlord
[{"Id": 69702157, "ScriptId": 19042553, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5380789, "CreationDate": "08/02/2021 20:58:24", "VersionNumber": 1.0, "Title": "Handwriting_Recognition_CRNN", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 285.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 283.0, "LinesInsertedFromFork": 2.0, "LinesDeletedFromFork": 20.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 283.0, "TotalVotes": 0}]
[{"Id": 93174767, "KernelVersionId": 69702157, "SourceDatasetVersionId": 1400106}]
[{"Id": 1400106, "DatasetId": 818027, "DatasourceVersionId": 1432829, "CreatorUserId": 4287407, "LicenseName": "CC0: Public Domain", "CreationDate": "08/05/2020 17:20:36", "VersionNumber": 1.0, "Title": "Handwriting Recognition", "Slug": "handwriting-recognition", "Subtitle": "Transcriptions of 400,000 handwritten names", "Description": "### Overview\nThis dataset consists of more than four hundred thousand handwritten names collected through charity projects.\n\nCharacter Recognition utilizes image processing technologies to convert characters on scanned documents into digital forms. It typically performs well in machine-printed fonts. However, it still poses difficult challenges for machines to recognize handwritten characters, because of the huge variation in individual writing styles.\n\nThere are 206,799 first names and 207,024 surnames in total. The data was divided into a training set (331,059), testing set (41,382), and validation set (41,382) respectively.\n\n### Content\n\nThe input data here are hundreds of thousands of images of handwritten names. In the Data, you\u2019ll find the transcribed images broken up into test, training, and validation sets.\n\nImage Lable follow the following naming format enabling you to extend the data set with your own data.\n\n| Image | URL | | | | |\n| --- | --- | --- | --- | --- | --- |\n| D2M | 15 | 0010079F | 0002 | 1 | first\tname.jpg |\n| D2M | 15 | 0010079F | 0002 | 1 | surname.jpg |\t\n| D2M | 15 | 0010079F | 0003 | 2 | surname.jpg |\n| D2M | 15 | 0010079F | 0004 | 3 | first name.jpg |\n| D2M | 15 | 0010079F | 0004 | 3 | surname.jpg |\n| D2M | 15 | 0010079F | 0005 | 4 | first name.jpg |\n| D2M | 15 | 0010079F | 0006 | 5 | first name.jpg |\n| D2M | 15 | 0010079F | 0006 | 5 | surname.jpg | \n| D2M | 15 | 0010079F | 0007 | 6 | first name.jpg |\n\n### Inspiration\n\nThe Inspiration of this is to explore the task of classifying handwritten text and to convert handwritten text into the digital format using various approaches out there", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 818027, "CreatorUserId": 4287407, "OwnerUserId": 4287407.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1400106.0, "CurrentDatasourceVersionId": 1432829.0, "ForumId": 833132, "Type": 2, "CreationDate": "08/05/2020 17:20:36", "LastActivityDate": "08/05/2020", "TotalViews": 155565, "TotalDownloads": 19303, "TotalVotes": 347, "TotalKernels": 54}]
[{"Id": 4287407, "UserName": "landlord", "DisplayName": "landlord", "RegisterDate": "01/05/2020", "PerformanceTier": 2}]
import os import cv2 import random import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from keras import backend as K from keras.models import Model from keras.layers import ( Input, Conv2D, MaxPooling2D, Reshape, Bidirectional, LSTM, Dense, Lambda, Activation, BatchNormalization, Dropout, ) from keras.optimizers import Adam train = pd.read_csv("/kaggle/input/handwriting-recognition/written_name_train_v2.csv") valid = pd.read_csv( "/kaggle/input/handwriting-recognition/written_name_validation_v2.csv" ) plt.figure(figsize=(15, 10)) for i in range(6): ax = plt.subplot(2, 3, i + 1) img_dir = ( "/kaggle/input/handwriting-recognition/train_v2/train/" + train.loc[i, "FILENAME"] ) image = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE) plt.imshow(image, cmap="gray") plt.title(train.loc[i, "IDENTITY"], fontsize=12) plt.axis("off") plt.subplots_adjust(wspace=0.2, hspace=-0.8) # ## Cleaning Data print("Number of NaNs in train set : ", train["IDENTITY"].isnull().sum()) print("Number of NaNs in validation set : ", valid["IDENTITY"].isnull().sum()) train.dropna(axis=0, inplace=True) valid.dropna(axis=0, inplace=True) # Also, there are some images in our data with the label 'UNREADABLE'. Lets check those images and remove them. unreadable = train[train["IDENTITY"] == "UNREADABLE"] unreadable.reset_index(inplace=True, drop=True) plt.figure(figsize=(15, 10)) for i in range(6): ax = plt.subplot(2, 3, i + 1) img_dir = ( "/kaggle/input/handwriting-recognition/train_v2/train/" + unreadable.loc[i, "FILENAME"] ) image = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE) plt.imshow(image, cmap="gray") plt.title(unreadable.loc[i, "IDENTITY"], fontsize=12) plt.axis("off") plt.subplots_adjust(wspace=0.2, hspace=-0.8) train = train[train["IDENTITY"] != "UNREADABLE"] valid = valid[valid["IDENTITY"] != "UNREADABLE"] # There are some labels which are in lowercase. To maintain uniformity in the labels, I convert all the labels to uppercase. train["IDENTITY"] = train["IDENTITY"].str.upper() valid["IDENTITY"] = valid["IDENTITY"].str.upper() # Reset the index and we are done with cleaning. train.reset_index(inplace=True, drop=True) valid.reset_index(inplace=True, drop=True) # ## Preprocessing and preparing the images for training # * The images are loaded as grayscale and reshaped to width 256 and height 64. # * The width and height are cropped if they are greater than 256 and 64 respectively. If they are smaller, then the image is padded with white pixels. Finally the image is rotated clockwise to bring the image shape to (x, y). # * The image is then normalized to range [0, 1] def preprocess(img): (h, w) = img.shape final_img = np.ones([64, 256]) * 255 # blank white image # crop if w > 256: img = img[:, :256] if h > 64: img = img[:64, :] final_img[:h, :w] = img return cv2.rotate(final_img, cv2.ROTATE_90_CLOCKWISE) # The model will be trained on 30000 images and validate on 3000 images train_size = 30000 valid_size = 3000 train_x = [] for i in range(train_size): img_dir = ( "/kaggle/input/handwriting-recognition/train_v2/train/" + train.loc[i, "FILENAME"] ) image = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE) image = preprocess(image) image = image / 255.0 train_x.append(image) valid_x = [] for i in range(valid_size): img_dir = ( "/kaggle/input/handwriting-recognition/validation_v2/validation/" + valid.loc[i, "FILENAME"] ) image = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE) image = preprocess(image) image = image / 255.0 valid_x.append(image) train_x = np.array(train_x).reshape(-1, 256, 64, 1) valid_x = np.array(valid_x).reshape(-1, 256, 64, 1) # ## Preparing the labels for CTC Loss # Learn more about CTC loss and why its amazing for text recognition from [here](https://theailearner.com/2019/05/29/connectionist-temporal-classificationctc/). # The labels have to be converted to numbers which represent each character in the training set. The 'alphabets' consist of A-Z and three special characters (- ' and space). alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ-' " max_str_len = 24 # max length of input labels num_of_characters = len(alphabets) + 1 # +1 for ctc pseudo blank num_of_timestamps = 64 # max length of predicted labels def label_to_num(label): label_num = [] for ch in label: label_num.append(alphabets.find(ch)) return np.array(label_num) def num_to_label(num): ret = "" for ch in num: if ch == -1: # CTC Blank break else: ret += alphabets[ch] return ret name = "JEBASTIN" print(name, "\n", label_to_num(name)) # * **train_y** contains the true labels converted to numbers and padded with -1. The length of each label is equal to max_str_len. # * **train_label_len** contains the length of each true label (without padding) # * **train_input_len** contains the length of each predicted label. The length of all the predicted labels is constant i.e number of timestamps - 2. # * **train_output** is a dummy output for ctc loss. # train_y = np.ones([train_size, max_str_len]) * -1 train_label_len = np.zeros([train_size, 1]) train_input_len = np.ones([train_size, 1]) * (num_of_timestamps - 2) train_output = np.zeros([train_size]) for i in range(train_size): train_label_len[i] = len(train.loc[i, "IDENTITY"]) train_y[i, 0 : len(train.loc[i, "IDENTITY"])] = label_to_num( train.loc[i, "IDENTITY"] ) valid_y = np.ones([valid_size, max_str_len]) * -1 valid_label_len = np.zeros([valid_size, 1]) valid_input_len = np.ones([valid_size, 1]) * (num_of_timestamps - 2) valid_output = np.zeros([valid_size]) for i in range(valid_size): valid_label_len[i] = len(valid.loc[i, "IDENTITY"]) valid_y[i, 0 : len(valid.loc[i, "IDENTITY"])] = label_to_num( valid.loc[i, "IDENTITY"] ) print( "True label : ", train.loc[100, "IDENTITY"], "\ntrain_y : ", train_y[100], "\ntrain_label_len : ", train_label_len[100], "\ntrain_input_len : ", train_input_len[100], ) # ## Building our model # input_data = Input(shape=(256, 64, 1), name="input") inner = Conv2D( 32, (3, 3), padding="same", name="conv1", kernel_initializer="he_normal" )(input_data) inner = BatchNormalization()(inner) inner = Activation("relu")(inner) inner = MaxPooling2D(pool_size=(2, 2), name="max1")(inner) inner = Conv2D( 64, (3, 3), padding="same", name="conv2", kernel_initializer="he_normal" )(inner) inner = BatchNormalization()(inner) inner = Activation("relu")(inner) inner = MaxPooling2D(pool_size=(2, 2), name="max2")(inner) inner = Dropout(0.3)(inner) inner = Conv2D( 128, (3, 3), padding="same", name="conv3", kernel_initializer="he_normal" )(inner) inner = BatchNormalization()(inner) inner = Activation("relu")(inner) inner = MaxPooling2D(pool_size=(1, 2), name="max3")(inner) inner = Dropout(0.3)(inner) # CNN to RNN inner = Reshape(target_shape=((64, 1024)), name="reshape")(inner) inner = Dense(64, activation="relu", kernel_initializer="he_normal", name="dense1")( inner ) ## RNN inner = Bidirectional(LSTM(256, return_sequences=True), name="lstm1")(inner) inner = Bidirectional(LSTM(256, return_sequences=True), name="lstm2")(inner) ## OUTPUT inner = Dense(num_of_characters, kernel_initializer="he_normal", name="dense2")(inner) y_pred = Activation("softmax", name="softmax")(inner) model = Model(inputs=input_data, outputs=y_pred) model.summary() # The output shape of the predictions is (64, 30). The model predicts words of 64 characters and each character contains the probability of the 30 alphabets which we defined earlier. # the ctc loss function def ctc_lambda_func(args): y_pred, labels, input_length, label_length = args # the 2 is critical here since the first couple outputs of the RNN # tend to be garbage y_pred = y_pred[:, 2:, :] return K.ctc_batch_cost(labels, y_pred, input_length, label_length) labels = Input(name="gtruth_labels", shape=[max_str_len], dtype="float32") input_length = Input(name="input_length", shape=[1], dtype="int64") label_length = Input(name="label_length", shape=[1], dtype="int64") ctc_loss = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")( [y_pred, labels, input_length, label_length] ) model_final = Model( inputs=[input_data, labels, input_length, label_length], outputs=ctc_loss ) # ## Train our model # the loss calculation occurs elsewhere, so we use a dummy lambda function for the loss model_final.compile( loss={"ctc": lambda y_true, y_pred: y_pred}, optimizer=Adam(lr=0.0001) ) model_final.fit( x=[train_x, train_y, train_input_len, train_label_len], y=train_output, validation_data=( [valid_x, valid_y, valid_input_len, valid_label_len], valid_output, ), epochs=60, batch_size=128, ) # ## Check model performance on validation set preds = model.predict(valid_x) decoded = K.get_value( K.ctc_decode( preds, input_length=np.ones(preds.shape[0]) * preds.shape[1], greedy=True )[0][0] ) prediction = [] for i in range(valid_size): prediction.append(num_to_label(decoded[i])) y_true = valid.loc[0:valid_size, "IDENTITY"] correct_char = 0 total_char = 0 correct = 0 for i in range(valid_size): pr = prediction[i] tr = y_true[i] total_char += len(tr) for j in range(min(len(tr), len(pr))): if tr[j] == pr[j]: correct_char += 1 if pr == tr: correct += 1 print("Correct characters predicted : %.2f%%" % (correct_char * 100 / total_char)) print("Correct words predicted : %.2f%%" % (correct * 100 / valid_size)) # ## Some predictions on test set test = pd.read_csv( "/kaggle/input/handwriting-recognition/written_name_validation_v2.csv" ) plt.figure(figsize=(15, 10)) for i in range(6): ax = plt.subplot(2, 3, i + 1) img_dir = ( "/kaggle/input/handwriting-recognition/validation_v2/validation/" + test.loc[i, "FILENAME"] ) image = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE) plt.imshow(image, cmap="gray") image = preprocess(image) image = image / 255.0 pred = model.predict(image.reshape(1, 256, 64, 1)) decoded = K.get_value( K.ctc_decode( pred, input_length=np.ones(pred.shape[0]) * pred.shape[1], greedy=True )[0][0] ) plt.title(num_to_label(decoded[0]), fontsize=12) plt.axis("off") plt.subplots_adjust(wspace=0.2, hspace=-0.8)
false
2
3,589
0
4,203
3,589
69702652
<jupyter_start><jupyter_text>Hitters Baseball Data Description Major League Baseball Data from the 1986 and 1987 seasons. Usage Hitters Format A data frame with 322 observations of major league players on the following 20 variables. AtBat: Number of times at bat in 1986 Hits: Number of hits in 1986 HmRun: Number of home runs in 1986 Runs: Number of runs in 1986 RBI: Number of runs batted in in 1986 Walks: Number of walks in 1986 Years: Number of years in the major leagues CAtBat: Number of times at bat during his career CHits: Number of hits during his career CHmRun: Number of home runs during his career CRuns: Number of runs during his career CRBI: Number of runs batted in during his career CWalks: Number of walks during his career League: A factor with levels A and N indicating player's league at the end of 1986 Division: A factor with levels E and W indicating player's division at the end of 1986 PutOuts: Number of put outs in 1986 Assists: Number of assists in 1986 Errors: Number of errors in 1986 Salary: 1987 annual salary on opening day in thousands of dollars NewLeague: A factor with levels A and N indicating player's league at the beginning of 1987 Source This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University. This is part of the data that was used in the 1988 ASA Graphics Section Poster Session. The salary data were originally from Sports Illustrated, April 20, 1987. The 1986 and career statistics were obtained from The 1987 Baseball Encyclopedia Update published by Collier Books, Macmillan Publishing Company, New York. References Games, G., Witten, D., Hastie, T., and Tibshirani, R. (2013) An Introduction to Statistical Learning with applications in R, www.StatLearning.com, Springer-Verlag, New York Examples summary(Hitters) Dataset imported from https://www.r-project.org. Kaggle dataset identifier: hitters <jupyter_code>import pandas as pd df = pd.read_csv('hitters/hitters.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 322 entries, 0 to 321 Data columns (total 20 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 AtBat 322 non-null int64 1 Hits 322 non-null int64 2 HmRun 322 non-null int64 3 Runs 322 non-null int64 4 RBI 322 non-null int64 5 Walks 322 non-null int64 6 Years 322 non-null int64 7 CAtBat 322 non-null int64 8 CHits 322 non-null int64 9 CHmRun 322 non-null int64 10 CRuns 322 non-null int64 11 CRBI 322 non-null int64 12 CWalks 322 non-null int64 13 League 322 non-null object 14 Division 322 non-null object 15 PutOuts 322 non-null int64 16 Assists 322 non-null int64 17 Errors 322 non-null int64 18 Salary 263 non-null float64 19 NewLeague 322 non-null object dtypes: float64(1), int64(16), object(3) memory usage: 50.4+ KB <jupyter_text>Examples: { "AtBat": 293, "Hits": 66, "HmRun": 1, "Runs": 30, "RBI": 29, "Walks": 14, "Years": 1, "CAtBat": 293, "CHits": 66, "CHmRun": 1, "CRuns": 30, "CRBI": 29, "CWalks": 14, "League": "A", "Division": "E", "PutOuts": 446, "Assists": 33, "Errors": 20, "Salary": NaN, "NewLeague": "A" } { "AtBat": 315, "Hits": 81, "HmRun": 7, "Runs": 24, "RBI": 38, "Walks": 39, "Years": 14, "CAtBat": 3449, "CHits": 835, "CHmRun": 69, "CRuns": 321, "CRBI": 414, "CWalks": 375, "League": "N", "Division": "W", "PutOuts": 632, "Assists": 43, "Errors": 10, "Salary": 475.0, "NewLeague": "N" } { "AtBat": 479, "Hits": 130, "HmRun": 18, "Runs": 66, "RBI": 72, "Walks": 76, "Years": 3, "CAtBat": 1624, "CHits": 457, "CHmRun": 63, "CRuns": 224, "CRBI": 266, "CWalks": 263, "League": "A", "Division": "W", "PutOuts": 880, "Assists": 82, "Errors": 14, "Salary": 480.0, "NewLeague": "A" } { "AtBat": 496, "Hits": 141, "HmRun": 20, "Runs": 65, "RBI": 78, "Walks": 37, "Years": 11, "CAtBat": 5628, "CHits": 1575, "CHmRun": 225, "CRuns": 828, "CRBI": 838, "CWalks": 354, "League": "N", "Division": "E", "PutOuts": 200, "Assists": 11, "Errors": 3, "Salary": 500.0, "NewLeague": "N" } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory from warnings import filterwarnings filterwarnings("ignore") import seaborn as sns import matplotlib.pyplot as plt import statsmodels.api as sm import statsmodels.formula.api as smf from sklearn import model_selection from sklearn.neighbors import LocalOutlierFactor, KNeighborsRegressor from sklearn.metrics import mean_squared_error, r2_score from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from sklearn.ensemble import RandomForestRegressor from sklearn.decomposition import PCA from sklearn.preprocessing import scale from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.model_selection import GridSearchCV from sklearn.svm import SVR from sklearn.neural_network import MLPRegressor import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Index # 1) [Verinin Yüklenmesi ve Veriye Ön Bakış](#1) # 2) [Değişkenler Arasındaki İlişki](#2) # 3) [Boş Değerlerin Bulunması ve Doldurulması, Aykırı Değerlerin Baskılanması](#3) # 4) [Analiz](#4) # &emsp; A) [Doğrusal Modeller](#4.0) # &emsp; 4.1) [Basit Doğrusal Regresyon Modeli](#4.1) # &emsp; 4.2) [Çoklu Doğrıusal Regresyon Modeli](#4.2) # &emsp; 4.3) [PCR Modeli](#4.3) # &emsp; 4.4) [PLS Modeli](#4.4) # &emsp; 4.5) [Ridge Regresyon Modeli](#4.5) # &emsp; 4.6) [Lasso Modeli](#4.6) # &emsp; 4.7) [ElasticNet Modeli](#4.7) # &emsp; B) [Doğrusal Olmayan Modeller](#5.0) # &emsp; 5.1) [KNN Modeli](#5.1) # &emsp; 5.2) [SVR (Destek Vektör Regresyonu) Modeli](#5.2) # &emsp; 5.3) [Doğrusal Olmayan SVR Modeli](#5.3) # &emsp; 5.4) [Çok Katmanlı Algılayıcı Modeli](#5.4) # &emsp; 5.5) [CART Modeli](#5.5) # ## 1. Verinin Yüklenmesi maindata = pd.read_csv("../input/hitters/hitters.csv") maindatac = maindata.copy() # ## Veriye Ön Bakış maindata.info() maindata.head() # # ## 2. Değişkenler Arasındaki İlişki maindata.describe().T maindata.corr() plt.subplots(figsize=(20, 20)) sns.heatmap(maindata.corr(), annot=True, fmt=".1f") # # ## 3.Boş Değerlerin Bulunması ve Doldurulması maindata.isnull().sum() # There are 59 NaN Value in Salary column maindata.groupby("League")["Salary"].mean() maindatac["Salary"].fillna( maindatac.groupby("League")["Salary"].transform("mean"), inplace=True ) maindatac.isnull().sum() # ## Aykırı Değerlerin Baskılanması maindata.boxplot(column="Salary", by="League") outlier = maindatac["Salary"] Q1 = outlier.quantile(0.25) Q3 = outlier.quantile(0.75) IQR = Q3 - Q1 sns.boxplot(x=outlier) low_limit = Q1 - 1.5 * IQR high_limit = Q3 + 1.5 * IQR (outlier < low_limit) | (outlier > high_limit) outlier_tf = (outlier < low_limit) | (outlier > high_limit) outlier_tf # Üst aykırıları üst limite eşitlemek outlier[outlier_tf] = high_limit outlier[outlier_tf] sns.boxplot(x=outlier) # # ## 4. Analiz # sns.pairplot(maindatac, kind = "reg") dms = pd.get_dummies(maindatac[["League", "Division", "NewLeague"]]) X_ = maindatac.drop(["Salary", "League", "Division", "NewLeague"], axis=1).astype( "float64" ) y = maindatac["Salary"] X = pd.concat([X_, dms[["League_N", "Division_W", "NewLeague_N"]]], axis=1) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42 ) print("X_train", X_train.shape) print("X_test", X_test.shape) print("y_train", y_train.shape) print("y_test", y_test.shape) # # ### **A. Doğrusal Modeller** # ### 1) Basit Doğrusal Regresyon # X = Bağımsız Katsayı X = maindatac[["CHits"]] X = sm.add_constant(X) # 1 ekliyoruz X[0:5] # y = Bağımlı Katsayı y = maindatac["Salary"] y[0:5] # Modelin Oluşturulması lm = sm.OLS(y, X) model = lm.fit() model.summary() # Model Parametreleri model.params # Modelin Güven Aralığı model.conf_int() print("f_pvalue: ", "%.4f" % model.f_pvalue) print("f_value: ", "%.2f" % model.fvalue) print("t_value: ", "%.2f" % model.tvalues[0:1]) print("adj. r2: ", "%.2f" % model.rsquared_adj) g_t = pd.DataFrame({"gercek_y": y[0:5], "tahmini_y": model.fittedvalues[0:5]}) g_t print( "Salary = " + str("%.2f" % model.params[0]) + " + Hits*" + str("%.2f" % model.params[1]) ) # 2 değişken arasındaki ilişki fig = sns.regplot( maindatac["Hits"], maindatac["Salary"], scatter_kws={"color": "r", "s": 9} ) fig.set_title("Model Denklemi: Salary = 320.17 + Hits*0.27") fig.set_ylabel("Salary") fig.set_xlabel("Hits") plt.ylim(bottom=0) plt.show() # #### Tahmin # **Model :** *Salary = 320.17 + Hits*0.27* # **Soru :** *Eğer bir atıcı sezon boyunca 2000 atış yapabildiyse maaşı ne olur? * X = maindatac[["Hits"]] y = maindatac["Salary"] reg = LinearRegression() model = reg.fit(X, y) model.predict([[2000]]) # # #### Hata Kareleri lm = sm.OLS(y, X) model = lm.fit() mse = mean_squared_error(y, model.fittedvalues) rmse = np.sqrt(mse) print("Hata Karelerinin Ortalaması: ", mse) print("Hata Karelerinin Ortalamasının Karekökü: ", rmse) lm = smf.ols("Salary ~ Hits", maindatac) model = lm.fit() mse = mean_squared_error(y, model.fittedvalues) rmse = np.sqrt(mse) print("Hata Karelerinin Ortalaması: ", mse) print("Hata Karelerinin Ortalamasının Karekökü: ", rmse) X = maindatac[["Hits"]] model = reg.fit(X, y) k_t = pd.DataFrame({"gercek_y": y[0:10], "tahmini_y": reg.predict(X)[0:10]}) k_t k_t["hata"] = k_t["gercek_y"] - k_t["tahmini_y"] k_t k_t["hata_kare"] = k_t["hata"] ** 2 k_t print("Hata Karelerinin Toplamı: ", np.sum(k_t["hata_kare"])) print("Hata Karelerinin Ortalaması: ", np.mean(k_t["hata_kare"])) print("Hata Karelerinin Ortalamasının Karekökü: ", np.sqrt(np.mean(k_t["hata_kare"]))) # # ### 4.2) Çoklu Doğrusal Regresyon # #### Modelin Oluşturulması reg = LinearRegression() model = reg.fit(X_train, y_train) print("Sabit Katsayı: ", model.intercept_) print("Değişkenlerin Katsayıları: ", model.coef_) # #### Tahmin # yeni_katsayilar = [ [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], ] yeni_katsayilar = pd.DataFrame(yeni_katsayilar).T model.predict(yeni_katsayilar) # #### Model Doğrulama y_pred = model.predict(X_train) y_pred_t = model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) model.score(X_train, y_train) r2_score(y_train, y_pred) # #### Grafik tuned = RandomForestRegressor(max_depth=8, max_features=3, n_estimators=200) tuned.fit(X_train, y_train) Importance = pd.DataFrame( {"Importance": tuned.feature_importances_ * 100}, index=X_train.columns ) Importance.sort_values(by="Importance", axis=0, ascending=True).plot( kind="barh", color="r" ) plt.xlabel("Değişken Önem Düzeyleri") # # ### 4.3) PCR Model # #### Modelin Oluşturulması pca = PCA() lm = LinearRegression() X_reduced_train = pca.fit_transform(scale(X_train)) pcr_model = lm.fit(X_reduced_train, y_train) print("Sabit Katsayı: ", pcr_model.intercept_) print("Katsayılar", pcr_model.coef_) # #### Tahmin yeni_katsayilar = [ [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], ] yeni_katsayilar = pd.DataFrame(yeni_katsayilar).T model.predict(yeni_katsayilar) # #### Model Doğrulama X_reduced_test = pca.fit_transform(scale(X_test)) y_pred = pcr_model.predict(X_reduced_train) y_pred_t = pcr_model.predict(X_reduced_test) print("Eğitim Seti Hata Katsayısı", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı", np.sqrt(mean_squared_error(y_test, y_pred_t))) model.score(X_train, y_train) r2_score(y_train, y_pred) # #### Grafik cv_10 = model_selection.KFold(n_splits=10, shuffle=True, random_state=1) RMSE = [] for i in np.arange(1, X_reduced_train.shape[1] + 1): score = np.sqrt( -1 * model_selection.cross_val_score( lm, X_reduced_train[:, :i], y_train.ravel(), cv=cv_10, scoring="neg_mean_squared_error", ).mean() ) RMSE.append(score) plt.plot(RMSE, "-v") plt.xlabel("Bileşen Sayısı") plt.ylabel("RMSE") plt.title("Maaş Tahmin Modeli İçin PCR Model Doğrulama") # # ### 4.4) PLS Model # #### Modelin Oluşturulması pls_model = PLSRegression().fit(X_train, y_train) print("Değişken Katsayılar: ", pls_model.coef_) # #### Tahmin # #### Model Doğrulama y_pred = pls_model.predict(X_train) y_pred_t = pls_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) # R2 Değeri r2_score(y_train, y_pred) model.score(X_train, y_train) # #### Grafik cv_10 = model_selection.KFold(n_splits=10, shuffle=True, random_state=1) RMSE = [] for i in np.arange(1, X_train.shape[1] + 1): pls = PLSRegression(n_components=i) score = np.sqrt( -1 * cross_val_score( pls, X_train, y_train, cv=cv_10, scoring="neg_mean_squared_error" ).mean() ) RMSE.append(score) plt.plot(RMSE, "-v") plt.xlabel("Bileşen Sayısı") plt.ylabel("RMSE") plt.title("Maaş Tahmin Modeli İçin PLS Model Doğrulama") # # ### 4.5) Ridge Regresyon # #### Modelin Oluşturulması ridge_model = Ridge(alpha=0.1).fit(X_train, y_train) print("Sabit Katsayı :", ridge_model.intercept_) print("Değişken Katsayıları :", ridge_model.coef_) # #### Tahmin # #### Model Doğrulama y_pred = ridge_model.predict(X_train) y_pred_t = ridge_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) ridge_model.score(X_train, y_train) r2_score(y_train, y_pred) # #### Grafik ridge = Ridge() lambdalar = 10 ** np.linspace(10, -2, 100) * 0.5 katsayilar = [] for i in lambdalar: ridge.set_params(alpha=i) ridge.fit(X_train, y_train) katsayilar.append(ridge.coef_) ax = plt.gca() ax.plot(lambdalar * 2, katsayilar) ax.set_xscale("log") plt.axis("tight") plt.xlabel("alpha") plt.ylabel("weights") # # ### 4.6) Lasso Model # #### Modelin Oluşturulması lasso_model = Lasso().fit(X_train, y_train) print("Sabit Katsayı: ", lasso_model.intercept_) print("Değişken Katsayılar: ", lasso_model.coef_) # #### Tahmin # #### Model Doğrulama y_pred = lasso_model.predict(X_train) y_pred_t = lasso_model.predict(X_test) print("Eğtim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) # #### Grafik lasso = Lasso() lambdalar = 10 ** np.linspace(10, -2, 100) * 0.5 katsayilar = [] for i in lambdalar: lasso.set_params(alpha=i) lasso.fit(X_train, y_train) katsayilar.append(lasso.coef_) ax = plt.gca() ax.plot(lambdalar * 2, katsayilar) ax.set_xscale("log") plt.axis("tight") plt.xlabel("alpha") plt.ylabel("weights") # # ### 4.7) ElasticNet # #### Modelin Oluşturulması elas_model = ElasticNet().fit(X_train, y_train) print("Sabit Katsayılar: ", elas_model.intercept_) print("Değişken Katsayılar: ", elas_model.coef_) # #### Tahmin # #### Model Doğrulama y_pred = elas_model.predict(X_train) y_pred_t = elas_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) elas_model.score(X_train, y_train) r2_score(y_train, y_pred) # #### Grafik elastic = ElasticNet() lambdalar = 10 ** np.linspace(10, -2, 100) * 0.5 katsayilar = [] for i in lambdalar: elastic.set_params(alpha=i) elastic.fit(X_train, y_train) katsayilar.append(elastic.coef_) ax = plt.gca() ax.plot(lambdalar * 2, katsayilar) ax.set_xscale("log") plt.axis("tight") plt.xlabel("alpha") plt.ylabel("weights") # > YORUM # ### **B) Doğrusal Olmayan Modeller** # ### 5.1) KNN Modeli # #### Modelin Oluşturulması knn_model = KNeighborsRegressor().fit(X_train, y_train) print("En Yakın Komşu Sayısı: ", knn_model.n_neighbors) # #### Tahmin y_pred = knn_model.predict(X_train) y_pred_t = knn_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) # #### Model Doğrulama knn = KNeighborsRegressor() knn_params = {"n_neighbors": np.arange(1, 50, 1)} knn_cv_model = GridSearchCV(knn, knn_params, cv=10) knn_cv_model.fit(X_train, y_train) knn_tuned = KNeighborsRegressor(n_neighbors=knn_cv_model.best_params_["n_neighbors"]) knn_tuned.fit(X_train, y_train) np.sqrt(mean_squared_error(y_test, knn_tuned.predict(X_test))) # #### Grafik # ### 5.2) SVR (Destek Vektör Regresyonu) # #### Modelin Oluşturulması svr_model = SVR("linear").fit(X_train, y_train) print("Sabit Katsayı: ", svr_model.intercept_) print("Değişken Katsayılar: ", svr_model.coef_) # #### Tahmin y_pred = svr_model.predict(X_train) y_pred_t = svr_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) # #### Model Doğrulama svr_params = {"C": np.arange(1, 20, 1)} svr_cv_model = GridSearchCV(svr_model, svr_params, cv=10) svr_cv_model.fit(X_train, y_train) svr_tuned = SVR("linear", C=pd.Series(svr_cv_model.best_params_)) svr_tuned.fit(X_train, y_train) np.sqrt(mean_squared_error(y_test, svr_tuned.predict(X_test))) # #### Grafik # ### 5.3) Doğrusal Olmayan SVR Modeli # #### Modelin Oluşturulması svr_model_d = SVR("rbf").fit(X_train, y_train) print("Sabit Katsayı: ", svr_model_d.intercept_) y_pred = svr_model_d.predict(X_train) y_pred_t = svr_model_d.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) svr_params = {"C": np.arange(1, 20, 1)} svr_cv_model_d = GridSearchCV(svr_model_d, svr_params, cv=10) svr_cv_model_d.fit(X_train, y_train) svr_tuned = SVR("rbf", C=pd.Series(svr_cv_model_d.best_params_)) svr_tuned.fit(X_train, y_train) np.sqrt(mean_squared_error(y_test, svr_tuned.predict(X_test))) # #### Grafik # ### 5.4) Çok Katmanlı Algılayıcı Modeli mlp_model = MLPRegressor(hidden_layer_sizes=(100, 20)).fit(X_train, y_train) y_pred = mlp_model.predict(X_train) y_pred_t = mlp_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) mlp_params = { "alpha": [10, 20, 30, 40, 50], "hidden_layer_sizes": (20, 20), "activation": ["relu", "logistic"], } mlp_cv_model = GridSearchCV(mlp_model, mlp_params, cv=10) mlp_cv_model.fit(X_train, y_train) mlp_tuned = MLPRegressor( alpha=mlp_cv_model.best_params_["alpha"], hidden_layer_sizes=mlp_cv_model.best_params_["hidden_layer_sizes"], activation=mlp_cv_model.best_params_["activation"], ).fit(X_train, y_train) np.sqrt(mean_squared_error(y_test, mlp_tuned.predict(X_test)))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/702/69702652.ipynb
hitters
gurkansaman
[{"Id": 69702652, "ScriptId": 18858277, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7401724, "CreationDate": "08/02/2021 21:01:38", "VersionNumber": 21.0, "Title": "At\u0131c\u0131lar", "EvaluationDate": "08/02/2021", "IsChange": false, "TotalLines": 641.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 641.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93175370, "KernelVersionId": 69702652, "SourceDatasetVersionId": 2424717}]
[{"Id": 2424717, "DatasetId": 1467251, "DatasourceVersionId": 2466924, "CreatorUserId": 1933645, "LicenseName": "Unknown", "CreationDate": "07/14/2021 12:23:07", "VersionNumber": 1.0, "Title": "Hitters", "Slug": "hitters", "Subtitle": "Major League Baseball Data from the 1986 and 1987 seasons.", "Description": "Baseball Data\nDescription\nMajor League Baseball Data from the 1986 and 1987 seasons.\n\nUsage\nHitters\n\nFormat\nA data frame with 322 observations of major league players on the following 20 variables.\n\nAtBat: Number of times at bat in 1986\n\nHits: Number of hits in 1986\n\nHmRun: Number of home runs in 1986\n\nRuns: Number of runs in 1986\n\nRBI: Number of runs batted in in 1986\n\nWalks: Number of walks in 1986\n\nYears: Number of years in the major leagues\n\nCAtBat: Number of times at bat during his career\n\nCHits: Number of hits during his career\n\nCHmRun: Number of home runs during his career\n\nCRuns: Number of runs during his career\n\nCRBI: Number of runs batted in during his career\n\nCWalks: Number of walks during his career\n\nLeague: A factor with levels A and N indicating player's league at the end of 1986\n\nDivision: A factor with levels E and W indicating player's division at the end of 1986\n\nPutOuts: Number of put outs in 1986\n\nAssists: Number of assists in 1986\n\nErrors: Number of errors in 1986\n\nSalary: 1987 annual salary on opening day in thousands of dollars\n\nNewLeague: A factor with levels A and N indicating player's league at the beginning of 1987\n\nSource\nThis dataset was taken from the StatLib library which is maintained at Carnegie Mellon University. This is part of the data that was used in the 1988 ASA Graphics Section Poster Session. The salary data were originally from Sports Illustrated, April 20, 1987. The 1986 and career statistics were obtained from The 1987 Baseball Encyclopedia Update published by Collier Books, Macmillan Publishing Company, New York.\n\nReferences\nGames, G., Witten, D., Hastie, T., and Tibshirani, R. (2013) An Introduction to Statistical Learning with applications in R, www.StatLearning.com, Springer-Verlag, New York\n\nExamples\nsummary(Hitters)\n\nDataset imported from https://www.r-project.org.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1467251, "CreatorUserId": 1933645, "OwnerUserId": 1933645.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2424717.0, "CurrentDatasourceVersionId": 2466924.0, "ForumId": 1486877, "Type": 2, "CreationDate": "07/14/2021 12:23:07", "LastActivityDate": "07/14/2021", "TotalViews": 7001, "TotalDownloads": 130, "TotalVotes": 6, "TotalKernels": 4}]
[{"Id": 1933645, "UserName": "gurkansaman", "DisplayName": "gurkansaman", "RegisterDate": "05/22/2018", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory from warnings import filterwarnings filterwarnings("ignore") import seaborn as sns import matplotlib.pyplot as plt import statsmodels.api as sm import statsmodels.formula.api as smf from sklearn import model_selection from sklearn.neighbors import LocalOutlierFactor, KNeighborsRegressor from sklearn.metrics import mean_squared_error, r2_score from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict from sklearn.ensemble import RandomForestRegressor from sklearn.decomposition import PCA from sklearn.preprocessing import scale from sklearn.cross_decomposition import PLSRegression, PLSSVD from sklearn.model_selection import GridSearchCV from sklearn.svm import SVR from sklearn.neural_network import MLPRegressor import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # Index # 1) [Verinin Yüklenmesi ve Veriye Ön Bakış](#1) # 2) [Değişkenler Arasındaki İlişki](#2) # 3) [Boş Değerlerin Bulunması ve Doldurulması, Aykırı Değerlerin Baskılanması](#3) # 4) [Analiz](#4) # &emsp; A) [Doğrusal Modeller](#4.0) # &emsp; 4.1) [Basit Doğrusal Regresyon Modeli](#4.1) # &emsp; 4.2) [Çoklu Doğrıusal Regresyon Modeli](#4.2) # &emsp; 4.3) [PCR Modeli](#4.3) # &emsp; 4.4) [PLS Modeli](#4.4) # &emsp; 4.5) [Ridge Regresyon Modeli](#4.5) # &emsp; 4.6) [Lasso Modeli](#4.6) # &emsp; 4.7) [ElasticNet Modeli](#4.7) # &emsp; B) [Doğrusal Olmayan Modeller](#5.0) # &emsp; 5.1) [KNN Modeli](#5.1) # &emsp; 5.2) [SVR (Destek Vektör Regresyonu) Modeli](#5.2) # &emsp; 5.3) [Doğrusal Olmayan SVR Modeli](#5.3) # &emsp; 5.4) [Çok Katmanlı Algılayıcı Modeli](#5.4) # &emsp; 5.5) [CART Modeli](#5.5) # ## 1. Verinin Yüklenmesi maindata = pd.read_csv("../input/hitters/hitters.csv") maindatac = maindata.copy() # ## Veriye Ön Bakış maindata.info() maindata.head() # # ## 2. Değişkenler Arasındaki İlişki maindata.describe().T maindata.corr() plt.subplots(figsize=(20, 20)) sns.heatmap(maindata.corr(), annot=True, fmt=".1f") # # ## 3.Boş Değerlerin Bulunması ve Doldurulması maindata.isnull().sum() # There are 59 NaN Value in Salary column maindata.groupby("League")["Salary"].mean() maindatac["Salary"].fillna( maindatac.groupby("League")["Salary"].transform("mean"), inplace=True ) maindatac.isnull().sum() # ## Aykırı Değerlerin Baskılanması maindata.boxplot(column="Salary", by="League") outlier = maindatac["Salary"] Q1 = outlier.quantile(0.25) Q3 = outlier.quantile(0.75) IQR = Q3 - Q1 sns.boxplot(x=outlier) low_limit = Q1 - 1.5 * IQR high_limit = Q3 + 1.5 * IQR (outlier < low_limit) | (outlier > high_limit) outlier_tf = (outlier < low_limit) | (outlier > high_limit) outlier_tf # Üst aykırıları üst limite eşitlemek outlier[outlier_tf] = high_limit outlier[outlier_tf] sns.boxplot(x=outlier) # # ## 4. Analiz # sns.pairplot(maindatac, kind = "reg") dms = pd.get_dummies(maindatac[["League", "Division", "NewLeague"]]) X_ = maindatac.drop(["Salary", "League", "Division", "NewLeague"], axis=1).astype( "float64" ) y = maindatac["Salary"] X = pd.concat([X_, dms[["League_N", "Division_W", "NewLeague_N"]]], axis=1) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42 ) print("X_train", X_train.shape) print("X_test", X_test.shape) print("y_train", y_train.shape) print("y_test", y_test.shape) # # ### **A. Doğrusal Modeller** # ### 1) Basit Doğrusal Regresyon # X = Bağımsız Katsayı X = maindatac[["CHits"]] X = sm.add_constant(X) # 1 ekliyoruz X[0:5] # y = Bağımlı Katsayı y = maindatac["Salary"] y[0:5] # Modelin Oluşturulması lm = sm.OLS(y, X) model = lm.fit() model.summary() # Model Parametreleri model.params # Modelin Güven Aralığı model.conf_int() print("f_pvalue: ", "%.4f" % model.f_pvalue) print("f_value: ", "%.2f" % model.fvalue) print("t_value: ", "%.2f" % model.tvalues[0:1]) print("adj. r2: ", "%.2f" % model.rsquared_adj) g_t = pd.DataFrame({"gercek_y": y[0:5], "tahmini_y": model.fittedvalues[0:5]}) g_t print( "Salary = " + str("%.2f" % model.params[0]) + " + Hits*" + str("%.2f" % model.params[1]) ) # 2 değişken arasındaki ilişki fig = sns.regplot( maindatac["Hits"], maindatac["Salary"], scatter_kws={"color": "r", "s": 9} ) fig.set_title("Model Denklemi: Salary = 320.17 + Hits*0.27") fig.set_ylabel("Salary") fig.set_xlabel("Hits") plt.ylim(bottom=0) plt.show() # #### Tahmin # **Model :** *Salary = 320.17 + Hits*0.27* # **Soru :** *Eğer bir atıcı sezon boyunca 2000 atış yapabildiyse maaşı ne olur? * X = maindatac[["Hits"]] y = maindatac["Salary"] reg = LinearRegression() model = reg.fit(X, y) model.predict([[2000]]) # # #### Hata Kareleri lm = sm.OLS(y, X) model = lm.fit() mse = mean_squared_error(y, model.fittedvalues) rmse = np.sqrt(mse) print("Hata Karelerinin Ortalaması: ", mse) print("Hata Karelerinin Ortalamasının Karekökü: ", rmse) lm = smf.ols("Salary ~ Hits", maindatac) model = lm.fit() mse = mean_squared_error(y, model.fittedvalues) rmse = np.sqrt(mse) print("Hata Karelerinin Ortalaması: ", mse) print("Hata Karelerinin Ortalamasının Karekökü: ", rmse) X = maindatac[["Hits"]] model = reg.fit(X, y) k_t = pd.DataFrame({"gercek_y": y[0:10], "tahmini_y": reg.predict(X)[0:10]}) k_t k_t["hata"] = k_t["gercek_y"] - k_t["tahmini_y"] k_t k_t["hata_kare"] = k_t["hata"] ** 2 k_t print("Hata Karelerinin Toplamı: ", np.sum(k_t["hata_kare"])) print("Hata Karelerinin Ortalaması: ", np.mean(k_t["hata_kare"])) print("Hata Karelerinin Ortalamasının Karekökü: ", np.sqrt(np.mean(k_t["hata_kare"]))) # # ### 4.2) Çoklu Doğrusal Regresyon # #### Modelin Oluşturulması reg = LinearRegression() model = reg.fit(X_train, y_train) print("Sabit Katsayı: ", model.intercept_) print("Değişkenlerin Katsayıları: ", model.coef_) # #### Tahmin # yeni_katsayilar = [ [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], ] yeni_katsayilar = pd.DataFrame(yeni_katsayilar).T model.predict(yeni_katsayilar) # #### Model Doğrulama y_pred = model.predict(X_train) y_pred_t = model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) model.score(X_train, y_train) r2_score(y_train, y_pred) # #### Grafik tuned = RandomForestRegressor(max_depth=8, max_features=3, n_estimators=200) tuned.fit(X_train, y_train) Importance = pd.DataFrame( {"Importance": tuned.feature_importances_ * 100}, index=X_train.columns ) Importance.sort_values(by="Importance", axis=0, ascending=True).plot( kind="barh", color="r" ) plt.xlabel("Değişken Önem Düzeyleri") # # ### 4.3) PCR Model # #### Modelin Oluşturulması pca = PCA() lm = LinearRegression() X_reduced_train = pca.fit_transform(scale(X_train)) pcr_model = lm.fit(X_reduced_train, y_train) print("Sabit Katsayı: ", pcr_model.intercept_) print("Katsayılar", pcr_model.coef_) # #### Tahmin yeni_katsayilar = [ [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], [10], ] yeni_katsayilar = pd.DataFrame(yeni_katsayilar).T model.predict(yeni_katsayilar) # #### Model Doğrulama X_reduced_test = pca.fit_transform(scale(X_test)) y_pred = pcr_model.predict(X_reduced_train) y_pred_t = pcr_model.predict(X_reduced_test) print("Eğitim Seti Hata Katsayısı", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı", np.sqrt(mean_squared_error(y_test, y_pred_t))) model.score(X_train, y_train) r2_score(y_train, y_pred) # #### Grafik cv_10 = model_selection.KFold(n_splits=10, shuffle=True, random_state=1) RMSE = [] for i in np.arange(1, X_reduced_train.shape[1] + 1): score = np.sqrt( -1 * model_selection.cross_val_score( lm, X_reduced_train[:, :i], y_train.ravel(), cv=cv_10, scoring="neg_mean_squared_error", ).mean() ) RMSE.append(score) plt.plot(RMSE, "-v") plt.xlabel("Bileşen Sayısı") plt.ylabel("RMSE") plt.title("Maaş Tahmin Modeli İçin PCR Model Doğrulama") # # ### 4.4) PLS Model # #### Modelin Oluşturulması pls_model = PLSRegression().fit(X_train, y_train) print("Değişken Katsayılar: ", pls_model.coef_) # #### Tahmin # #### Model Doğrulama y_pred = pls_model.predict(X_train) y_pred_t = pls_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) # R2 Değeri r2_score(y_train, y_pred) model.score(X_train, y_train) # #### Grafik cv_10 = model_selection.KFold(n_splits=10, shuffle=True, random_state=1) RMSE = [] for i in np.arange(1, X_train.shape[1] + 1): pls = PLSRegression(n_components=i) score = np.sqrt( -1 * cross_val_score( pls, X_train, y_train, cv=cv_10, scoring="neg_mean_squared_error" ).mean() ) RMSE.append(score) plt.plot(RMSE, "-v") plt.xlabel("Bileşen Sayısı") plt.ylabel("RMSE") plt.title("Maaş Tahmin Modeli İçin PLS Model Doğrulama") # # ### 4.5) Ridge Regresyon # #### Modelin Oluşturulması ridge_model = Ridge(alpha=0.1).fit(X_train, y_train) print("Sabit Katsayı :", ridge_model.intercept_) print("Değişken Katsayıları :", ridge_model.coef_) # #### Tahmin # #### Model Doğrulama y_pred = ridge_model.predict(X_train) y_pred_t = ridge_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) ridge_model.score(X_train, y_train) r2_score(y_train, y_pred) # #### Grafik ridge = Ridge() lambdalar = 10 ** np.linspace(10, -2, 100) * 0.5 katsayilar = [] for i in lambdalar: ridge.set_params(alpha=i) ridge.fit(X_train, y_train) katsayilar.append(ridge.coef_) ax = plt.gca() ax.plot(lambdalar * 2, katsayilar) ax.set_xscale("log") plt.axis("tight") plt.xlabel("alpha") plt.ylabel("weights") # # ### 4.6) Lasso Model # #### Modelin Oluşturulması lasso_model = Lasso().fit(X_train, y_train) print("Sabit Katsayı: ", lasso_model.intercept_) print("Değişken Katsayılar: ", lasso_model.coef_) # #### Tahmin # #### Model Doğrulama y_pred = lasso_model.predict(X_train) y_pred_t = lasso_model.predict(X_test) print("Eğtim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) # #### Grafik lasso = Lasso() lambdalar = 10 ** np.linspace(10, -2, 100) * 0.5 katsayilar = [] for i in lambdalar: lasso.set_params(alpha=i) lasso.fit(X_train, y_train) katsayilar.append(lasso.coef_) ax = plt.gca() ax.plot(lambdalar * 2, katsayilar) ax.set_xscale("log") plt.axis("tight") plt.xlabel("alpha") plt.ylabel("weights") # # ### 4.7) ElasticNet # #### Modelin Oluşturulması elas_model = ElasticNet().fit(X_train, y_train) print("Sabit Katsayılar: ", elas_model.intercept_) print("Değişken Katsayılar: ", elas_model.coef_) # #### Tahmin # #### Model Doğrulama y_pred = elas_model.predict(X_train) y_pred_t = elas_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) elas_model.score(X_train, y_train) r2_score(y_train, y_pred) # #### Grafik elastic = ElasticNet() lambdalar = 10 ** np.linspace(10, -2, 100) * 0.5 katsayilar = [] for i in lambdalar: elastic.set_params(alpha=i) elastic.fit(X_train, y_train) katsayilar.append(elastic.coef_) ax = plt.gca() ax.plot(lambdalar * 2, katsayilar) ax.set_xscale("log") plt.axis("tight") plt.xlabel("alpha") plt.ylabel("weights") # > YORUM # ### **B) Doğrusal Olmayan Modeller** # ### 5.1) KNN Modeli # #### Modelin Oluşturulması knn_model = KNeighborsRegressor().fit(X_train, y_train) print("En Yakın Komşu Sayısı: ", knn_model.n_neighbors) # #### Tahmin y_pred = knn_model.predict(X_train) y_pred_t = knn_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) # #### Model Doğrulama knn = KNeighborsRegressor() knn_params = {"n_neighbors": np.arange(1, 50, 1)} knn_cv_model = GridSearchCV(knn, knn_params, cv=10) knn_cv_model.fit(X_train, y_train) knn_tuned = KNeighborsRegressor(n_neighbors=knn_cv_model.best_params_["n_neighbors"]) knn_tuned.fit(X_train, y_train) np.sqrt(mean_squared_error(y_test, knn_tuned.predict(X_test))) # #### Grafik # ### 5.2) SVR (Destek Vektör Regresyonu) # #### Modelin Oluşturulması svr_model = SVR("linear").fit(X_train, y_train) print("Sabit Katsayı: ", svr_model.intercept_) print("Değişken Katsayılar: ", svr_model.coef_) # #### Tahmin y_pred = svr_model.predict(X_train) y_pred_t = svr_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) # #### Model Doğrulama svr_params = {"C": np.arange(1, 20, 1)} svr_cv_model = GridSearchCV(svr_model, svr_params, cv=10) svr_cv_model.fit(X_train, y_train) svr_tuned = SVR("linear", C=pd.Series(svr_cv_model.best_params_)) svr_tuned.fit(X_train, y_train) np.sqrt(mean_squared_error(y_test, svr_tuned.predict(X_test))) # #### Grafik # ### 5.3) Doğrusal Olmayan SVR Modeli # #### Modelin Oluşturulması svr_model_d = SVR("rbf").fit(X_train, y_train) print("Sabit Katsayı: ", svr_model_d.intercept_) y_pred = svr_model_d.predict(X_train) y_pred_t = svr_model_d.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) svr_params = {"C": np.arange(1, 20, 1)} svr_cv_model_d = GridSearchCV(svr_model_d, svr_params, cv=10) svr_cv_model_d.fit(X_train, y_train) svr_tuned = SVR("rbf", C=pd.Series(svr_cv_model_d.best_params_)) svr_tuned.fit(X_train, y_train) np.sqrt(mean_squared_error(y_test, svr_tuned.predict(X_test))) # #### Grafik # ### 5.4) Çok Katmanlı Algılayıcı Modeli mlp_model = MLPRegressor(hidden_layer_sizes=(100, 20)).fit(X_train, y_train) y_pred = mlp_model.predict(X_train) y_pred_t = mlp_model.predict(X_test) print("Eğitim Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_train, y_pred))) print("Test Seti Hata Katsayısı: ", np.sqrt(mean_squared_error(y_test, y_pred_t))) mlp_params = { "alpha": [10, 20, 30, 40, 50], "hidden_layer_sizes": (20, 20), "activation": ["relu", "logistic"], } mlp_cv_model = GridSearchCV(mlp_model, mlp_params, cv=10) mlp_cv_model.fit(X_train, y_train) mlp_tuned = MLPRegressor( alpha=mlp_cv_model.best_params_["alpha"], hidden_layer_sizes=mlp_cv_model.best_params_["hidden_layer_sizes"], activation=mlp_cv_model.best_params_["activation"], ).fit(X_train, y_train) np.sqrt(mean_squared_error(y_test, mlp_tuned.predict(X_test)))
[{"hitters/hitters.csv": {"column_names": "[\"AtBat\", \"Hits\", \"HmRun\", \"Runs\", \"RBI\", \"Walks\", \"Years\", \"CAtBat\", \"CHits\", \"CHmRun\", \"CRuns\", \"CRBI\", \"CWalks\", \"League\", \"Division\", \"PutOuts\", \"Assists\", \"Errors\", \"Salary\", \"NewLeague\"]", "column_data_types": "{\"AtBat\": \"int64\", \"Hits\": \"int64\", \"HmRun\": \"int64\", \"Runs\": \"int64\", \"RBI\": \"int64\", \"Walks\": \"int64\", \"Years\": \"int64\", \"CAtBat\": \"int64\", \"CHits\": \"int64\", \"CHmRun\": \"int64\", \"CRuns\": \"int64\", \"CRBI\": \"int64\", \"CWalks\": \"int64\", \"League\": \"object\", \"Division\": \"object\", \"PutOuts\": \"int64\", \"Assists\": \"int64\", \"Errors\": \"int64\", \"Salary\": \"float64\", \"NewLeague\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 322 entries, 0 to 321\nData columns (total 20 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 AtBat 322 non-null int64 \n 1 Hits 322 non-null int64 \n 2 HmRun 322 non-null int64 \n 3 Runs 322 non-null int64 \n 4 RBI 322 non-null int64 \n 5 Walks 322 non-null int64 \n 6 Years 322 non-null int64 \n 7 CAtBat 322 non-null int64 \n 8 CHits 322 non-null int64 \n 9 CHmRun 322 non-null int64 \n 10 CRuns 322 non-null int64 \n 11 CRBI 322 non-null int64 \n 12 CWalks 322 non-null int64 \n 13 League 322 non-null object \n 14 Division 322 non-null object \n 15 PutOuts 322 non-null int64 \n 16 Assists 322 non-null int64 \n 17 Errors 322 non-null int64 \n 18 Salary 263 non-null float64\n 19 NewLeague 322 non-null object \ndtypes: float64(1), int64(16), object(3)\nmemory usage: 50.4+ KB\n", "summary": "{\"AtBat\": {\"count\": 322.0, \"mean\": 380.92857142857144, \"std\": 153.40498147064488, \"min\": 16.0, \"25%\": 255.25, \"50%\": 379.5, \"75%\": 512.0, \"max\": 687.0}, \"Hits\": {\"count\": 322.0, \"mean\": 101.0248447204969, \"std\": 46.454741356766796, \"min\": 1.0, \"25%\": 64.0, \"50%\": 96.0, \"75%\": 137.0, \"max\": 238.0}, \"HmRun\": {\"count\": 322.0, \"mean\": 10.770186335403727, \"std\": 8.709037413827737, \"min\": 0.0, \"25%\": 4.0, \"50%\": 8.0, \"75%\": 16.0, \"max\": 40.0}, \"Runs\": {\"count\": 322.0, \"mean\": 50.909937888198755, \"std\": 26.02409548457972, \"min\": 0.0, \"25%\": 30.25, \"50%\": 48.0, \"75%\": 69.0, \"max\": 130.0}, \"RBI\": {\"count\": 322.0, \"mean\": 48.02795031055901, \"std\": 26.166894761424544, \"min\": 0.0, \"25%\": 28.0, \"50%\": 44.0, \"75%\": 64.75, \"max\": 121.0}, \"Walks\": {\"count\": 322.0, \"mean\": 38.74223602484472, \"std\": 21.63932655032488, \"min\": 0.0, \"25%\": 22.0, \"50%\": 35.0, \"75%\": 53.0, \"max\": 105.0}, \"Years\": {\"count\": 322.0, \"mean\": 7.444099378881988, \"std\": 4.926087269904596, \"min\": 1.0, \"25%\": 4.0, \"50%\": 6.0, \"75%\": 11.0, \"max\": 24.0}, \"CAtBat\": {\"count\": 322.0, \"mean\": 2648.6832298136646, \"std\": 2324.205870266538, \"min\": 19.0, \"25%\": 816.75, \"50%\": 1928.0, \"75%\": 3924.25, \"max\": 14053.0}, \"CHits\": {\"count\": 322.0, \"mean\": 717.5714285714286, \"std\": 654.4726274762833, \"min\": 4.0, \"25%\": 209.0, \"50%\": 508.0, \"75%\": 1059.25, \"max\": 4256.0}, \"CHmRun\": {\"count\": 322.0, \"mean\": 69.49068322981367, \"std\": 86.26606080180498, \"min\": 0.0, \"25%\": 14.0, \"50%\": 37.5, \"75%\": 90.0, \"max\": 548.0}, \"CRuns\": {\"count\": 322.0, \"mean\": 358.7950310559006, \"std\": 334.10588576614686, \"min\": 1.0, \"25%\": 100.25, \"50%\": 247.0, \"75%\": 526.25, \"max\": 2165.0}, \"CRBI\": {\"count\": 322.0, \"mean\": 330.11801242236027, \"std\": 333.2196169682779, \"min\": 0.0, \"25%\": 88.75, \"50%\": 220.5, \"75%\": 426.25, \"max\": 1659.0}, \"CWalks\": {\"count\": 322.0, \"mean\": 260.2391304347826, \"std\": 267.05808454363216, \"min\": 0.0, \"25%\": 67.25, \"50%\": 170.5, \"75%\": 339.25, \"max\": 1566.0}, \"PutOuts\": {\"count\": 322.0, \"mean\": 288.9378881987578, \"std\": 280.70461385993525, \"min\": 0.0, \"25%\": 109.25, \"50%\": 212.0, \"75%\": 325.0, \"max\": 1378.0}, \"Assists\": {\"count\": 322.0, \"mean\": 106.91304347826087, \"std\": 136.85487646596755, \"min\": 0.0, \"25%\": 7.0, \"50%\": 39.5, \"75%\": 166.0, \"max\": 492.0}, \"Errors\": {\"count\": 322.0, \"mean\": 8.040372670807454, \"std\": 6.368359079737258, \"min\": 0.0, \"25%\": 3.0, \"50%\": 6.0, \"75%\": 11.0, \"max\": 32.0}, \"Salary\": {\"count\": 263.0, \"mean\": 535.9258821292775, \"std\": 451.11868070253865, \"min\": 67.5, \"25%\": 190.0, \"50%\": 425.0, \"75%\": 750.0, \"max\": 2460.0}}", "examples": "{\"AtBat\":{\"0\":293,\"1\":315,\"2\":479,\"3\":496},\"Hits\":{\"0\":66,\"1\":81,\"2\":130,\"3\":141},\"HmRun\":{\"0\":1,\"1\":7,\"2\":18,\"3\":20},\"Runs\":{\"0\":30,\"1\":24,\"2\":66,\"3\":65},\"RBI\":{\"0\":29,\"1\":38,\"2\":72,\"3\":78},\"Walks\":{\"0\":14,\"1\":39,\"2\":76,\"3\":37},\"Years\":{\"0\":1,\"1\":14,\"2\":3,\"3\":11},\"CAtBat\":{\"0\":293,\"1\":3449,\"2\":1624,\"3\":5628},\"CHits\":{\"0\":66,\"1\":835,\"2\":457,\"3\":1575},\"CHmRun\":{\"0\":1,\"1\":69,\"2\":63,\"3\":225},\"CRuns\":{\"0\":30,\"1\":321,\"2\":224,\"3\":828},\"CRBI\":{\"0\":29,\"1\":414,\"2\":266,\"3\":838},\"CWalks\":{\"0\":14,\"1\":375,\"2\":263,\"3\":354},\"League\":{\"0\":\"A\",\"1\":\"N\",\"2\":\"A\",\"3\":\"N\"},\"Division\":{\"0\":\"E\",\"1\":\"W\",\"2\":\"W\",\"3\":\"E\"},\"PutOuts\":{\"0\":446,\"1\":632,\"2\":880,\"3\":200},\"Assists\":{\"0\":33,\"1\":43,\"2\":82,\"3\":11},\"Errors\":{\"0\":20,\"1\":10,\"2\":14,\"3\":3},\"Salary\":{\"0\":null,\"1\":475.0,\"2\":480.0,\"3\":500.0},\"NewLeague\":{\"0\":\"A\",\"1\":\"N\",\"2\":\"A\",\"3\":\"N\"}}"}}]
true
1
<start_data_description><data_path>hitters/hitters.csv: <column_names> ['AtBat', 'Hits', 'HmRun', 'Runs', 'RBI', 'Walks', 'Years', 'CAtBat', 'CHits', 'CHmRun', 'CRuns', 'CRBI', 'CWalks', 'League', 'Division', 'PutOuts', 'Assists', 'Errors', 'Salary', 'NewLeague'] <column_types> {'AtBat': 'int64', 'Hits': 'int64', 'HmRun': 'int64', 'Runs': 'int64', 'RBI': 'int64', 'Walks': 'int64', 'Years': 'int64', 'CAtBat': 'int64', 'CHits': 'int64', 'CHmRun': 'int64', 'CRuns': 'int64', 'CRBI': 'int64', 'CWalks': 'int64', 'League': 'object', 'Division': 'object', 'PutOuts': 'int64', 'Assists': 'int64', 'Errors': 'int64', 'Salary': 'float64', 'NewLeague': 'object'} <dataframe_Summary> {'AtBat': {'count': 322.0, 'mean': 380.92857142857144, 'std': 153.40498147064488, 'min': 16.0, '25%': 255.25, '50%': 379.5, '75%': 512.0, 'max': 687.0}, 'Hits': {'count': 322.0, 'mean': 101.0248447204969, 'std': 46.454741356766796, 'min': 1.0, '25%': 64.0, '50%': 96.0, '75%': 137.0, 'max': 238.0}, 'HmRun': {'count': 322.0, 'mean': 10.770186335403727, 'std': 8.709037413827737, 'min': 0.0, '25%': 4.0, '50%': 8.0, '75%': 16.0, 'max': 40.0}, 'Runs': {'count': 322.0, 'mean': 50.909937888198755, 'std': 26.02409548457972, 'min': 0.0, '25%': 30.25, '50%': 48.0, '75%': 69.0, 'max': 130.0}, 'RBI': {'count': 322.0, 'mean': 48.02795031055901, 'std': 26.166894761424544, 'min': 0.0, '25%': 28.0, '50%': 44.0, '75%': 64.75, 'max': 121.0}, 'Walks': {'count': 322.0, 'mean': 38.74223602484472, 'std': 21.63932655032488, 'min': 0.0, '25%': 22.0, '50%': 35.0, '75%': 53.0, 'max': 105.0}, 'Years': {'count': 322.0, 'mean': 7.444099378881988, 'std': 4.926087269904596, 'min': 1.0, '25%': 4.0, '50%': 6.0, '75%': 11.0, 'max': 24.0}, 'CAtBat': {'count': 322.0, 'mean': 2648.6832298136646, 'std': 2324.205870266538, 'min': 19.0, '25%': 816.75, '50%': 1928.0, '75%': 3924.25, 'max': 14053.0}, 'CHits': {'count': 322.0, 'mean': 717.5714285714286, 'std': 654.4726274762833, 'min': 4.0, '25%': 209.0, '50%': 508.0, '75%': 1059.25, 'max': 4256.0}, 'CHmRun': {'count': 322.0, 'mean': 69.49068322981367, 'std': 86.26606080180498, 'min': 0.0, '25%': 14.0, '50%': 37.5, '75%': 90.0, 'max': 548.0}, 'CRuns': {'count': 322.0, 'mean': 358.7950310559006, 'std': 334.10588576614686, 'min': 1.0, '25%': 100.25, '50%': 247.0, '75%': 526.25, 'max': 2165.0}, 'CRBI': {'count': 322.0, 'mean': 330.11801242236027, 'std': 333.2196169682779, 'min': 0.0, '25%': 88.75, '50%': 220.5, '75%': 426.25, 'max': 1659.0}, 'CWalks': {'count': 322.0, 'mean': 260.2391304347826, 'std': 267.05808454363216, 'min': 0.0, '25%': 67.25, '50%': 170.5, '75%': 339.25, 'max': 1566.0}, 'PutOuts': {'count': 322.0, 'mean': 288.9378881987578, 'std': 280.70461385993525, 'min': 0.0, '25%': 109.25, '50%': 212.0, '75%': 325.0, 'max': 1378.0}, 'Assists': {'count': 322.0, 'mean': 106.91304347826087, 'std': 136.85487646596755, 'min': 0.0, '25%': 7.0, '50%': 39.5, '75%': 166.0, 'max': 492.0}, 'Errors': {'count': 322.0, 'mean': 8.040372670807454, 'std': 6.368359079737258, 'min': 0.0, '25%': 3.0, '50%': 6.0, '75%': 11.0, 'max': 32.0}, 'Salary': {'count': 263.0, 'mean': 535.9258821292775, 'std': 451.11868070253865, 'min': 67.5, '25%': 190.0, '50%': 425.0, '75%': 750.0, 'max': 2460.0}} <dataframe_info> RangeIndex: 322 entries, 0 to 321 Data columns (total 20 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 AtBat 322 non-null int64 1 Hits 322 non-null int64 2 HmRun 322 non-null int64 3 Runs 322 non-null int64 4 RBI 322 non-null int64 5 Walks 322 non-null int64 6 Years 322 non-null int64 7 CAtBat 322 non-null int64 8 CHits 322 non-null int64 9 CHmRun 322 non-null int64 10 CRuns 322 non-null int64 11 CRBI 322 non-null int64 12 CWalks 322 non-null int64 13 League 322 non-null object 14 Division 322 non-null object 15 PutOuts 322 non-null int64 16 Assists 322 non-null int64 17 Errors 322 non-null int64 18 Salary 263 non-null float64 19 NewLeague 322 non-null object dtypes: float64(1), int64(16), object(3) memory usage: 50.4+ KB <some_examples> {'AtBat': {'0': 293, '1': 315, '2': 479, '3': 496}, 'Hits': {'0': 66, '1': 81, '2': 130, '3': 141}, 'HmRun': {'0': 1, '1': 7, '2': 18, '3': 20}, 'Runs': {'0': 30, '1': 24, '2': 66, '3': 65}, 'RBI': {'0': 29, '1': 38, '2': 72, '3': 78}, 'Walks': {'0': 14, '1': 39, '2': 76, '3': 37}, 'Years': {'0': 1, '1': 14, '2': 3, '3': 11}, 'CAtBat': {'0': 293, '1': 3449, '2': 1624, '3': 5628}, 'CHits': {'0': 66, '1': 835, '2': 457, '3': 1575}, 'CHmRun': {'0': 1, '1': 69, '2': 63, '3': 225}, 'CRuns': {'0': 30, '1': 321, '2': 224, '3': 828}, 'CRBI': {'0': 29, '1': 414, '2': 266, '3': 838}, 'CWalks': {'0': 14, '1': 375, '2': 263, '3': 354}, 'League': {'0': 'A', '1': 'N', '2': 'A', '3': 'N'}, 'Division': {'0': 'E', '1': 'W', '2': 'W', '3': 'E'}, 'PutOuts': {'0': 446, '1': 632, '2': 880, '3': 200}, 'Assists': {'0': 33, '1': 43, '2': 82, '3': 11}, 'Errors': {'0': 20, '1': 10, '2': 14, '3': 3}, 'Salary': {'0': None, '1': 475.0, '2': 480.0, '3': 500.0}, 'NewLeague': {'0': 'A', '1': 'N', '2': 'A', '3': 'N'}} <end_description>
6,534
0
8,343
6,534
69775642
import pandas as pd from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn import tree import matplotlib.pyplot as plt from sklearn.cluster import KMeans import seaborn as sns import numpy as np df1 = pd.read_csv("../input/userbk2021/20210705_latest_tx_volume_fee.csv") df2 = pd.read_csv("../input/userbk2021/20210705_latest_tx.csv") df3 = pd.read_csv("../input/userbk2021/20210705_users.csv") # merge 3 df together df = pd.merge(pd.merge(df1, df2, on="user_id"), df3, on="user_id") df cols = [ "volume", "max(T1.created_at)", "last_login", "gender", "age", "range_income", "KYC Tier", "Nationality", ] df = pd.DataFrame(df, columns=cols) df.head(5) # # Data Cleaning # # check standard missing values print(df.isna().sum().sort_values(ascending=False)) # find duplicate df[df.duplicated()] # Customizing Missing Data Values df["age"] = df["age"].mask(df["age"] < 17) df.dropna(inplace=True) # # Data Transformation # convert object features into numerical # from sklearn.preprocessing import OrdinalEncoder encoder = OrdinalEncoder() df["Gender_Code"] = encoder.fit_transform(df[["gender"]]) print(df["gender"].value_counts()) df.head() df["last_login"] = pd.to_datetime(df["last_login"]) df["last_login"].head() df["max(T1.created_at)"] = pd.to_datetime(df["max(T1.created_at)"]) df["max(T1.created_at)"].head() df["age"] = pd.to_numeric(df["age"]) def age_range(x): if x > 17 and x < 30: x = 0 elif x >= 30 and x < 40: x = 1 elif x >= 40 and x < 50: x = 2 elif x >= 50 and x < 60: x = 3 elif x >= 60: x = 4 else: x = 9 return x df["age"] = df["age"].apply(lambda x: age_range(x)) def age_range_str(x): if x == "0": x = "18-29" elif x == "1": x = "30-39" elif x == "2": x = "40-49" elif x == "3": x = "50-59" elif x == "4": x = "60 above" else: x = "others" return x df["age"] = df["age"].astype(str) df["age"] = df["age"].apply(lambda x: age_range_str(x)) df["age"] nn = df["Nationality"].value_counts() nn.to_csv("nn.csv") nn.head(10) def select_countries(x): if x == "Thailand": x = "Thai" elif x == "United Kingdom": x = "UK" elif x == "Laos": x = "Laos" elif x == "Japan": x = "Japan" elif x == "Russian Federation": x = "Russian" elif x == "India": x = "India" elif x == "France": x = "France" elif x == "South Korea": x = "South Korea" else: x = "Others" return x df["Nationality"] = df["Nationality"].astype(str) df["Nationality"] = df["Nationality"].apply(lambda x: select_countries(x)) df["Nationality"].value_counts() def select_income(x): if ( x == "0 - 15,000 THB" or x == "400 - 800 USD" or x == "500-999 USD" or x == "1000-1499 USD" or x == "15000-29999 THB" or x == "30000-49999 THB" or x == "15,001 - 30,000 THB" or x == "30,001 - 50,000 THB" or x == "801 - 1,500 USD" or x == "0-14999 THB" ): x = "<50,000 THB" elif ( x == "50000 - 99999 THB" or x == "50,001 - 100,000 THB" or x == "1500-2999 USD" or x == "1,501 - 2,800 USD " or x == "50000 - 99999 THB" or x == "1500-2999 USD " or x == "1,501 - 2,800 USD" ): x = "50,000 - 100,000 THB" elif ( x == "100000-199999 THB" or x == "200000-399999 THB" or x == "3000-5999 USD" or x == "1500-2999 USD" or x == "400,001 - 700,000 THB" or x == "2,801 - 5,600 USD" or x == "5,601 - 11,200 USD" or x == "100,001 - 200,000 THB" or x == "6000-11999 USD" or x == "12000-19999 USD" or x == "200,001 - 400,000 THB" or x == "400000-699999 THB" ): x = "100,000 - 700,000 THB" elif ( x == "1000000-1999999 THB" or x == "700000-999999 THB" or x == "30000-59999 USD" or x == "20000-29999 USD" or x == "1,000,001 - 2,000,000 THB" or x == "700,001 - 1,000,000 THB" ): x = "700,000 - 2M THB" elif ( x == "มากกว่า 2,000,000 บาท" or x == "60000 USD or above" or x == "More than 2,000,000 THB" ): x = "> 2M" # else: # x = "Others" return x df["range_income"] = df["range_income"].astype(str) df["range_income"] = df["range_income"].apply(lambda x: select_income(x)) df["range_income"] df["range_income"] # # SELECT Low Volume low_vol = df["volume"].quantile(q=0.25) # target + volume y = df["volume"].apply(lambda x: 0 if x <= low_vol else 1) # 1 for high, 0 for low df["volume_level"] = y df.head(5) grouped = df.groupby("volume_level") # dfLow_column=['user_id','volume','max(T1.created_at)','last_login','Gender_Code','range_income','KYC Tier','Natoinality','volume_level'] dfLow_column = [ "Gender_Code", "age", "range_income", "KYC Tier", "Nationality", "volume_level", ] dfLow = pd.DataFrame(grouped.get_group(0), columns=dfLow_column) dfLow.head() # # Clustering¶ # dfVolume_column = [ "volume", "Gender_Code", "age", "range_income", "KYC Tier", "Nationality", "volume_level", ] dfVol = pd.DataFrame(grouped.get_group(0), columns=dfVolume_column) dfVol.head() from sklearn.preprocessing import MinMaxScaler Low = pd.get_dummies(dfLow) Low = MinMaxScaler().fit_transform(Low) dfLowSC_column = ["Gender_Code", "age", "range_income", "KYC Tier", "Nationality"] dfLowSC = pd.DataFrame(grouped.get_group(0), columns=dfLowSC_column) LowSC = pd.get_dummies(dfLowSC) LowSC = MinMaxScaler().fit_transform(LowSC) km = KMeans(n_clusters=15) clust = km.fit_predict(Low) dfLow["Cluster"] = clust dfLow # # EDA def plot_crosstab(df, col1, col2): churn_crosstab = pd.crosstab(df.iloc[:, col1], df.iloc[:, col2], normalize=True) churn_crosstab.plot(kind="bar", grid=True) plt.show() # Gender plot_crosstab(dfLow, 6, 0) # age plot_crosstab(dfLow, 6, 1) # range_income plot_crosstab(dfLow, 6, 2) # kyc tier plot_crosstab(dfLow, 6, 3) # Nationality plot_crosstab(dfLow, 6, 4) for k in range(2, 20): model = KMeans(n_clusters=k, random_state=1).fit(LowSC) labels = model.labels_ score = metrics.silhouette_score(Low, labels, metric="euclidean") print(k, "Score: %.2f" % score) from yellowbrick.cluster import SilhouetteVisualizer for k in range(5, 15): model = KMeans(k, random_state=1) visualizer = SilhouetteVisualizer(model, colors="yellowbrick") visualizer.fit(Low) visualizer.show() # # EDA by volume # Not by clustering grand_total_vol = dfVol["volume"].sum() grand_total_vol def select_group_vol(groupped, cata, col): df_vol = pd.DataFrame(groupped.get_group(cata), columns=["volume", "col"]) df_vol["TolVol"] = df_vol["volume"].sum() Tol_vol_sub = df_vol["volume"].sum() return Tol_vol_sub Nation = dfVol.groupby("Nationality") ThVol = pd.DataFrame(Nation.get_group("Thai"), columns=["volume", "Nationality"]) ThVol["TolVol"] = ThVol["volume"].sum() TolVolTh = ThVol["volume"].sum() TolVolTh LaVol = pd.DataFrame(Nation.get_group("Laos"), columns=["volume", "Nationality"]) LaVol["TolVol"] = LaVol["volume"].sum() TolVolLa = LaVol["volume"].sum() TolVolLa JpVol = pd.DataFrame(Nation.get_group("Japan"), columns=["volume", "Nationality"]) JpVol["TolVol"] = JpVol["volume"].sum() TolVolJp = JpVol["volume"].sum() TolVolJp UKVol = pd.DataFrame(Nation.get_group("UK"), columns=["volume", "Nationality"]) UKVol["TolVol"] = UKVol["volume"].sum() TolVolUK = UKVol["volume"].sum() TolVolUK TolVolRu = select_group_vol(Nation, "Russian", "Nationality") TolVolRu TolVolIn = select_group_vol(Nation, "India", "Nationality") TolVolIn TolVolFr = select_group_vol(Nation, "France", "Nationality") TolVolFr TolVolSK = select_group_vol(Nation, "South Korea", "Nationality") TolVolSK OtVol = pd.DataFrame(Nation.get_group("Others"), columns=["volume", "Nationality"]) OtVol["TolVol"] = OtVol["volume"].sum() TolVolOt = OtVol["volume"].sum() TolVolOt TotalVol_nation = pd.DataFrame( { "Total Volume": [ TolVolTh, TolVolLa, TolVolJp, TolVolUK, TolVolRu, TolVolIn, TolVolFr, TolVolSK, TolVolOt, ], }, index=[ "Thai", "Laos", "Japan", "UK", "Russian", "India", "France", "South Korea", "Others", ], ) TotalVol_nation["percent per nation"] = ( TotalVol_nation["Total Volume"] / TotalVol_nation["Total Volume"].sum() ) * 100 TotalVol_nation dfVol["Nationality"].value_counts() plot = TotalVol_nation.plot.pie(y="Total Volume", figsize=(5, 5)) # **AGE **** group_age_vol = dfVol.groupby("age") TwVol = pd.DataFrame(group_age_vol.get_group("18-29"), columns=["volume", "age"]) TwVol["TolVol"] = TwVol["volume"].sum() To_TwVol = TwVol["volume"].sum() To_TwVol ThiVol = pd.DataFrame(group_age_vol.get_group("30-39"), columns=["volume", "age"]) ThiVol["TolVol"] = ThiVol["volume"].sum() To_ThiVol = ThiVol["volume"].sum() To_ThiVol FoVol = pd.DataFrame(group_age_vol.get_group("40-49"), columns=["volume", "age"]) FoVol["TolVol"] = FoVol["volume"].sum() To_FoVol = FoVol["volume"].sum() To_FoVol FiVol = pd.DataFrame(group_age_vol.get_group("50-59"), columns=["volume", "age"]) FiVol["TolVol"] = FiVol["volume"].sum() To_FiVol = FiVol["volume"].sum() To_FiVol SiVol = pd.DataFrame(group_age_vol.get_group("60 above"), columns=["volume", "age"]) SiVol["TolVol"] = SiVol["volume"].sum() To_SiVol = SiVol["volume"].sum() To_SiVol TotalVol_age = pd.DataFrame( { "Total Volume by age": [To_TwVol, To_ThiVol, To_FoVol, To_FiVol, To_SiVol], }, index=["18-29", "30-39", "40-49", "50-59", "60 and more"], ) TotalVol_age["percent per age group"] = ( TotalVol_age["Total Volume by age"] / grand_total_vol ) * 100 TotalVol_age plot = TotalVol_age.plot.pie(y="Total Volume by age", figsize=(5, 5)) # **Income range** range_Income = dfVol.groupby("range_income") Tol_lowIncomeVol = select_group_vol(range_Income, "<50,000 THB", "range_income") Tol_lowIncomeVol Tol_mid_low_IncomeVol = select_group_vol( range_Income, "50,000 - 100,000 THB", "range_income" ) Tol_mid_low_IncomeVol Tol_mid_IncomeVol = select_group_vol( range_Income, "100,000 - 700,000 THB", "range_income" ) Tol_mid_IncomeVol Tol_high_mid_IncomeVol = select_group_vol( range_Income, "700,000 - 2M THB", "range_income" ) Tol_high_mid_IncomeVol Tol_high_IncomeVol = select_group_vol(range_Income, "> 2M", "range_income") Tol_high_IncomeVol TotalVol_income = pd.DataFrame( { "Total Volume by income range": [ Tol_lowIncomeVol, Tol_mid_low_IncomeVol, Tol_mid_IncomeVol, Tol_high_mid_IncomeVol, Tol_high_IncomeVol, ], }, index=[ "<50,000 THB", "50,000 - 100,000 THB", "100,000 - 700,000 THB", "700,000 - 2M THB", "> 2M", ], ) TotalVol_income["percent per income rabge"] = ( TotalVol_income["Total Volume by income range"] / grand_total_vol ) * 100 TotalVol_income plot = TotalVol_income.plot.pie(y="Total Volume by income range", figsize=(5, 5)) # **KYC Tier** kyc_tier = dfVol.groupby("KYC Tier") Tol_T1_Vol = select_group_vol(kyc_tier, 1, "KYC Tier") Tol_T1_Vol Tol_T2_Vol = select_group_vol(kyc_tier, 2, "KYC Tier") Tol_T2_Vol Tol_T3_Vol = select_group_vol(kyc_tier, 3, "KYC Tier") Tol_T3_Vol TotalVol_KYC = pd.DataFrame( {"Total Volume by KYC Tier": [Tol_T1_Vol, Tol_T2_Vol, Tol_T3_Vol]}, index=["Tier 1", "Tier 2", "Tier 3"], ) TotalVol_KYC["percent per KYC tier"] = ( TotalVol_KYC["Total Volume by KYC Tier"] / grand_total_vol ) * 100 TotalVol_KYC plot = TotalVol_KYC.plot.pie(y="Total Volume by KYC Tier", figsize=(5, 5)) # Country Cluster # dfCC = pd.DataFrame( dfVol[dfVol["Nationality"] == "Thai"], columns=["Gender_Code", "range_income", "KYC Tier", "age"], ) dfCC CC = pd.get_dummies(dfCC) dfCC dfCC.describe() CC = pd.get_dummies(dfCC) CC = MinMaxScaler().fit_transform(CC) for k in range(2, 25): model = KMeans(n_clusters=k, random_state=1).fit(CC) labels = model.labels_ score = metrics.silhouette_score(CC, labels, metric="euclidean") print(k, "Score: %.2f" % score) km = KMeans(n_clusters=24) clust = km.fit_predict(CC) dfCC["Cluster"] = clust dfCC dfCC["age"].value_counts() dfJ = pd.DataFrame( dfVol[dfVol["Nationality"] == "Thai"], columns=["volume", "KYC Tier", "Gender_Code", "age"], ) ax = dfJ["volume"].plot.hist() sum = dfJ["volume"].sum() sum tierTwo = dfJ[dfJ["KYC Tier"] == 3] tierTwo["volume"].mean() tierOne = dfJ[dfJ["KYC Tier"] == 1] tierOne["volume"].mean() Female = dfJ[dfJ["Gender_Code"] == 0.0] Female["volume"].sum() each = dfJ.groupby(dfJ["age"]).mean() percent = (each / sum) * 100 each dfCC["Cluster"].value_counts() dfJ.plot.scatter(x="KYC Tier", y="volume") dfCC["KYC Tier"].value_counts() plot_crosstab(dfCC, 4, 0) plot_crosstab(dfCC, 4, 2) plot_crosstab(dfCC, 4, 1) plot_crosstab(dfCC, 4, 3) dfCC["Gender_Code"].value_counts() # # EDA 2.0 # Not by clustering dfLow["Nationality"].value_counts() import matplotlib.pyplot as plt values = dfLow["Nationality"].value_counts() colors = ["b", "g", "r", "c", "m", "y", "orange", "lime", "black"] labels = [ "Thai", "Japan", "Loas", "UK", "Russian", "India", "France", "Sonth Korea", "Others", ] explode = (0.2, 0, 0, 0, 0, 0, 0, 0, 0) plt.pie(values, explode=explode, counterclock=False, shadow=True) plt.title("Percenatge of each nationality in low volume group") plt.legend(labels, loc=3) plt.show() dfLow["Nationality"].value_counts().plot( kind="pie", explode=explode, title="Gender of people taking the survey that have income", fontsize=15, autopct="%1.1f%%", pctdistance=5, labeldistance=7, ) plt.show() dfLow["Gender_Code"].value_counts().plot( kind="pie", title="Percentage of male an", fontsize=15, autopct="%1.1f%%", pctdistance=0.5, labeldistance=1.2, ) plt.show() dfLow["KYC Tier"].value_counts().plot( kind="pie", title="Percentage of each KYC Tier in low volume group ", fontsize=15, autopct="%1.1f%%", pctdistance=0.5, labeldistance=1.2, ) plt.show() dfLow["range_income"].value_counts().plot( kind="pie", title="Percentage of each KYC Tier in low volume group ", fontsize=15, autopct="%1.1f%%", pctdistance=2.2, labeldistance=2.5, ) plt.show() # # Cluster analys 2 # **User age around 18-29** group_age = dfLow.groupby("age") dfLow_young = pd.DataFrame( group_age.get_group("18-29"), columns=["Gender_Code", "range_income", "KYC Tier", "Nationality"], ) dfLow_young Low_young = pd.get_dummies(dfLow_young) dfLow_young km = KMeans(n_clusters=9) clust = km.fit_predict(Low_young) dfLow_young["Cluster"] = clust dfLow_young.head() plot_crosstab(dfLow_young, 4, 0) plot_crosstab(dfLow_young, 4, 1) plot_crosstab(dfLow_young, 4, 2) plot_crosstab(dfLow_young, 4, 3) dfLowSC_column_young = ["Gender_Code", "range_income", "KYC Tier", "Nationality"] dfLow_youngSC = pd.DataFrame(group_age.get_group("18-29"), columns=dfLowSC_column_young) Low_youngSC = pd.get_dummies(dfLow_youngSC) Low_youngSC = MinMaxScaler().fit_transform(Low_youngSC) for k in range(2, 10): model = KMeans(n_clusters=k, random_state=1).fit(Low_youngSC) labels = model.labels_ score = metrics.silhouette_score(Low_youngSC, labels, metric="euclidean") print(k, "Score: %.2f" % score) dfLow_young["Nationality"].value_counts() # **user age 30-39** dfLow_th = pd.DataFrame( group_age.get_group("30-39"), columns=["Gender_Code", "range_income", "KYC Tier", "Nationality"], ) dfLow_th Low_th = pd.get_dummies(dfLow_th) dfLow_th dfLowSC_column_young = ["Gender_Code", "range_income", "KYC Tier", "Nationality"] dfLow_thSC = pd.DataFrame(group_age.get_group("30-39"), columns=dfLowSC_column_young) Low_thSC = pd.get_dummies(dfLow_thSC) Low_thSC = MinMaxScaler().fit_transform(Low_thSC) for k in range(2, 21): model = KMeans(n_clusters=k, random_state=1).fit(Low_thSC) labels = model.labels_ score = metrics.silhouette_score(Low_thSC, labels, metric="euclidean") print(k, "Score: %.2f" % score) km = KMeans(n_clusters=20) clust = km.fit_predict(Low_th) dfLow_th["Cluster"] = clust dfLow_th plot_crosstab(dfLow_th, 4, 0) plot_crosstab(dfLow_th, 4, 1) plot_crosstab(dfLow_th, 4, 2) plot_crosstab(dfLow_th, 4, 3) dfLow["range_income"].value_counts() # **user age 40-49****** dfLow_fo = pd.DataFrame( group_age.get_group("40-49"), columns=["Gender_Code", "range_income", "KYC Tier", "Nationality"], ) Low_fo = pd.get_dummies(dfLow_fo) dfLow_fo dfLow_foSC = pd.DataFrame(group_age.get_group("40-49"), columns=dfLowSC_column_young) Low_foSC = pd.get_dummies(dfLow_foSC) Low_foSC = MinMaxScaler().fit_transform(Low_foSC) for k in range(2, 21): model = KMeans(n_clusters=k, random_state=1).fit(Low_foSC) labels = model.labels_ score = metrics.silhouette_score(Low_foSC, labels, metric="euclidean") print(k, "Score: %.2f" % score) km = KMeans(n_clusters=19) clust = km.fit_predict(Low_fo) dfLow_fo["Cluster"] = clust dfLow_fo.head(2) plot_crosstab(dfLow_fo, 4, 2) plot_crosstab(dfLow_fo, 4, 0) plot_crosstab(dfLow_fo, 4, 3) plot_crosstab(dfLow_fo, 4, 1) # **age 50-51** dfLow_fi = pd.DataFrame( group_age.get_group("50-59"), columns=["Gender_Code", "range_income", "KYC Tier", "Nationality"], ) Low_fi = pd.get_dummies(dfLow_fi) dfLow_fi dfLow_fiSC = pd.DataFrame(group_age.get_group("50-59"), columns=dfLowSC_column_young) Low_fiSC = pd.get_dummies(dfLow_fiSC) Low_fiSC = MinMaxScaler().fit_transform(Low_fiSC) for k in range(2, 26): model = KMeans(n_clusters=k, random_state=1).fit(Low_fiSC) labels = model.labels_ score = metrics.silhouette_score(Low_fiSC, labels, metric="euclidean") print(k, "Score: %.2f" % score) km = km = KMeans(n_clusters=19) clust = km.fit_predict(Low_fi) dfLow_fi["Cluster"] = clust dfLow_fi plot_crosstab(dfLow_fi, 4, 2) plot_crosstab(dfLow_fi, 4, 0) plot_crosstab(dfLow_fi, 4, 3) plot_crosstab(dfLow_fi, 4, 1) # **age 60 and above** dfLow_si = pd.DataFrame( group_age.get_group("60 above"), columns=["Gender_Code", "range_income", "KYC Tier", "Nationality"], ) Low_si = pd.get_dummies(dfLow_si) dfLow_si slt_df = dfLow_si[dfLow_si["range_income"] == "<50,000 THB"] dfLow["Nationality"].value_counts() dfLow_siSC = pd.DataFrame(group_age.get_group("60 above"), columns=dfLowSC_column_young) Low_siSC = pd.get_dummies(dfLow_siSC) Low_siSC = MinMaxScaler().fit_transform(Low_siSC) for k in range(2, 16): model = KMeans(n_clusters=k, random_state=1).fit(Low_siSC) labels = model.labels_ score = metrics.silhouette_score(Low_siSC, labels, metric="euclidean") print(k, "Score: %.2f" % score) km = km = KMeans(n_clusters=12) clust = km.fit_predict(Low_si) dfLow_si["Cluster"] = clust dfLow_si.head(2) plot_crosstab(dfLow_si, 4, 2) plot_crosstab(dfLow_si, 4, 0) plot_crosstab(dfLow_si, 4, 3) plot_crosstab(dfLow_si, 4, 1)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/775/69775642.ipynb
null
null
[{"Id": 69775642, "ScriptId": 18398544, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5915792, "CreationDate": "08/03/2021 07:19:44", "VersionNumber": 4.0, "Title": "BK 06-07-21", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 714.0, "LinesInsertedFromPrevious": 64.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 650.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import pandas as pd from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn import tree import matplotlib.pyplot as plt from sklearn.cluster import KMeans import seaborn as sns import numpy as np df1 = pd.read_csv("../input/userbk2021/20210705_latest_tx_volume_fee.csv") df2 = pd.read_csv("../input/userbk2021/20210705_latest_tx.csv") df3 = pd.read_csv("../input/userbk2021/20210705_users.csv") # merge 3 df together df = pd.merge(pd.merge(df1, df2, on="user_id"), df3, on="user_id") df cols = [ "volume", "max(T1.created_at)", "last_login", "gender", "age", "range_income", "KYC Tier", "Nationality", ] df = pd.DataFrame(df, columns=cols) df.head(5) # # Data Cleaning # # check standard missing values print(df.isna().sum().sort_values(ascending=False)) # find duplicate df[df.duplicated()] # Customizing Missing Data Values df["age"] = df["age"].mask(df["age"] < 17) df.dropna(inplace=True) # # Data Transformation # convert object features into numerical # from sklearn.preprocessing import OrdinalEncoder encoder = OrdinalEncoder() df["Gender_Code"] = encoder.fit_transform(df[["gender"]]) print(df["gender"].value_counts()) df.head() df["last_login"] = pd.to_datetime(df["last_login"]) df["last_login"].head() df["max(T1.created_at)"] = pd.to_datetime(df["max(T1.created_at)"]) df["max(T1.created_at)"].head() df["age"] = pd.to_numeric(df["age"]) def age_range(x): if x > 17 and x < 30: x = 0 elif x >= 30 and x < 40: x = 1 elif x >= 40 and x < 50: x = 2 elif x >= 50 and x < 60: x = 3 elif x >= 60: x = 4 else: x = 9 return x df["age"] = df["age"].apply(lambda x: age_range(x)) def age_range_str(x): if x == "0": x = "18-29" elif x == "1": x = "30-39" elif x == "2": x = "40-49" elif x == "3": x = "50-59" elif x == "4": x = "60 above" else: x = "others" return x df["age"] = df["age"].astype(str) df["age"] = df["age"].apply(lambda x: age_range_str(x)) df["age"] nn = df["Nationality"].value_counts() nn.to_csv("nn.csv") nn.head(10) def select_countries(x): if x == "Thailand": x = "Thai" elif x == "United Kingdom": x = "UK" elif x == "Laos": x = "Laos" elif x == "Japan": x = "Japan" elif x == "Russian Federation": x = "Russian" elif x == "India": x = "India" elif x == "France": x = "France" elif x == "South Korea": x = "South Korea" else: x = "Others" return x df["Nationality"] = df["Nationality"].astype(str) df["Nationality"] = df["Nationality"].apply(lambda x: select_countries(x)) df["Nationality"].value_counts() def select_income(x): if ( x == "0 - 15,000 THB" or x == "400 - 800 USD" or x == "500-999 USD" or x == "1000-1499 USD" or x == "15000-29999 THB" or x == "30000-49999 THB" or x == "15,001 - 30,000 THB" or x == "30,001 - 50,000 THB" or x == "801 - 1,500 USD" or x == "0-14999 THB" ): x = "<50,000 THB" elif ( x == "50000 - 99999 THB" or x == "50,001 - 100,000 THB" or x == "1500-2999 USD" or x == "1,501 - 2,800 USD " or x == "50000 - 99999 THB" or x == "1500-2999 USD " or x == "1,501 - 2,800 USD" ): x = "50,000 - 100,000 THB" elif ( x == "100000-199999 THB" or x == "200000-399999 THB" or x == "3000-5999 USD" or x == "1500-2999 USD" or x == "400,001 - 700,000 THB" or x == "2,801 - 5,600 USD" or x == "5,601 - 11,200 USD" or x == "100,001 - 200,000 THB" or x == "6000-11999 USD" or x == "12000-19999 USD" or x == "200,001 - 400,000 THB" or x == "400000-699999 THB" ): x = "100,000 - 700,000 THB" elif ( x == "1000000-1999999 THB" or x == "700000-999999 THB" or x == "30000-59999 USD" or x == "20000-29999 USD" or x == "1,000,001 - 2,000,000 THB" or x == "700,001 - 1,000,000 THB" ): x = "700,000 - 2M THB" elif ( x == "มากกว่า 2,000,000 บาท" or x == "60000 USD or above" or x == "More than 2,000,000 THB" ): x = "> 2M" # else: # x = "Others" return x df["range_income"] = df["range_income"].astype(str) df["range_income"] = df["range_income"].apply(lambda x: select_income(x)) df["range_income"] df["range_income"] # # SELECT Low Volume low_vol = df["volume"].quantile(q=0.25) # target + volume y = df["volume"].apply(lambda x: 0 if x <= low_vol else 1) # 1 for high, 0 for low df["volume_level"] = y df.head(5) grouped = df.groupby("volume_level") # dfLow_column=['user_id','volume','max(T1.created_at)','last_login','Gender_Code','range_income','KYC Tier','Natoinality','volume_level'] dfLow_column = [ "Gender_Code", "age", "range_income", "KYC Tier", "Nationality", "volume_level", ] dfLow = pd.DataFrame(grouped.get_group(0), columns=dfLow_column) dfLow.head() # # Clustering¶ # dfVolume_column = [ "volume", "Gender_Code", "age", "range_income", "KYC Tier", "Nationality", "volume_level", ] dfVol = pd.DataFrame(grouped.get_group(0), columns=dfVolume_column) dfVol.head() from sklearn.preprocessing import MinMaxScaler Low = pd.get_dummies(dfLow) Low = MinMaxScaler().fit_transform(Low) dfLowSC_column = ["Gender_Code", "age", "range_income", "KYC Tier", "Nationality"] dfLowSC = pd.DataFrame(grouped.get_group(0), columns=dfLowSC_column) LowSC = pd.get_dummies(dfLowSC) LowSC = MinMaxScaler().fit_transform(LowSC) km = KMeans(n_clusters=15) clust = km.fit_predict(Low) dfLow["Cluster"] = clust dfLow # # EDA def plot_crosstab(df, col1, col2): churn_crosstab = pd.crosstab(df.iloc[:, col1], df.iloc[:, col2], normalize=True) churn_crosstab.plot(kind="bar", grid=True) plt.show() # Gender plot_crosstab(dfLow, 6, 0) # age plot_crosstab(dfLow, 6, 1) # range_income plot_crosstab(dfLow, 6, 2) # kyc tier plot_crosstab(dfLow, 6, 3) # Nationality plot_crosstab(dfLow, 6, 4) for k in range(2, 20): model = KMeans(n_clusters=k, random_state=1).fit(LowSC) labels = model.labels_ score = metrics.silhouette_score(Low, labels, metric="euclidean") print(k, "Score: %.2f" % score) from yellowbrick.cluster import SilhouetteVisualizer for k in range(5, 15): model = KMeans(k, random_state=1) visualizer = SilhouetteVisualizer(model, colors="yellowbrick") visualizer.fit(Low) visualizer.show() # # EDA by volume # Not by clustering grand_total_vol = dfVol["volume"].sum() grand_total_vol def select_group_vol(groupped, cata, col): df_vol = pd.DataFrame(groupped.get_group(cata), columns=["volume", "col"]) df_vol["TolVol"] = df_vol["volume"].sum() Tol_vol_sub = df_vol["volume"].sum() return Tol_vol_sub Nation = dfVol.groupby("Nationality") ThVol = pd.DataFrame(Nation.get_group("Thai"), columns=["volume", "Nationality"]) ThVol["TolVol"] = ThVol["volume"].sum() TolVolTh = ThVol["volume"].sum() TolVolTh LaVol = pd.DataFrame(Nation.get_group("Laos"), columns=["volume", "Nationality"]) LaVol["TolVol"] = LaVol["volume"].sum() TolVolLa = LaVol["volume"].sum() TolVolLa JpVol = pd.DataFrame(Nation.get_group("Japan"), columns=["volume", "Nationality"]) JpVol["TolVol"] = JpVol["volume"].sum() TolVolJp = JpVol["volume"].sum() TolVolJp UKVol = pd.DataFrame(Nation.get_group("UK"), columns=["volume", "Nationality"]) UKVol["TolVol"] = UKVol["volume"].sum() TolVolUK = UKVol["volume"].sum() TolVolUK TolVolRu = select_group_vol(Nation, "Russian", "Nationality") TolVolRu TolVolIn = select_group_vol(Nation, "India", "Nationality") TolVolIn TolVolFr = select_group_vol(Nation, "France", "Nationality") TolVolFr TolVolSK = select_group_vol(Nation, "South Korea", "Nationality") TolVolSK OtVol = pd.DataFrame(Nation.get_group("Others"), columns=["volume", "Nationality"]) OtVol["TolVol"] = OtVol["volume"].sum() TolVolOt = OtVol["volume"].sum() TolVolOt TotalVol_nation = pd.DataFrame( { "Total Volume": [ TolVolTh, TolVolLa, TolVolJp, TolVolUK, TolVolRu, TolVolIn, TolVolFr, TolVolSK, TolVolOt, ], }, index=[ "Thai", "Laos", "Japan", "UK", "Russian", "India", "France", "South Korea", "Others", ], ) TotalVol_nation["percent per nation"] = ( TotalVol_nation["Total Volume"] / TotalVol_nation["Total Volume"].sum() ) * 100 TotalVol_nation dfVol["Nationality"].value_counts() plot = TotalVol_nation.plot.pie(y="Total Volume", figsize=(5, 5)) # **AGE **** group_age_vol = dfVol.groupby("age") TwVol = pd.DataFrame(group_age_vol.get_group("18-29"), columns=["volume", "age"]) TwVol["TolVol"] = TwVol["volume"].sum() To_TwVol = TwVol["volume"].sum() To_TwVol ThiVol = pd.DataFrame(group_age_vol.get_group("30-39"), columns=["volume", "age"]) ThiVol["TolVol"] = ThiVol["volume"].sum() To_ThiVol = ThiVol["volume"].sum() To_ThiVol FoVol = pd.DataFrame(group_age_vol.get_group("40-49"), columns=["volume", "age"]) FoVol["TolVol"] = FoVol["volume"].sum() To_FoVol = FoVol["volume"].sum() To_FoVol FiVol = pd.DataFrame(group_age_vol.get_group("50-59"), columns=["volume", "age"]) FiVol["TolVol"] = FiVol["volume"].sum() To_FiVol = FiVol["volume"].sum() To_FiVol SiVol = pd.DataFrame(group_age_vol.get_group("60 above"), columns=["volume", "age"]) SiVol["TolVol"] = SiVol["volume"].sum() To_SiVol = SiVol["volume"].sum() To_SiVol TotalVol_age = pd.DataFrame( { "Total Volume by age": [To_TwVol, To_ThiVol, To_FoVol, To_FiVol, To_SiVol], }, index=["18-29", "30-39", "40-49", "50-59", "60 and more"], ) TotalVol_age["percent per age group"] = ( TotalVol_age["Total Volume by age"] / grand_total_vol ) * 100 TotalVol_age plot = TotalVol_age.plot.pie(y="Total Volume by age", figsize=(5, 5)) # **Income range** range_Income = dfVol.groupby("range_income") Tol_lowIncomeVol = select_group_vol(range_Income, "<50,000 THB", "range_income") Tol_lowIncomeVol Tol_mid_low_IncomeVol = select_group_vol( range_Income, "50,000 - 100,000 THB", "range_income" ) Tol_mid_low_IncomeVol Tol_mid_IncomeVol = select_group_vol( range_Income, "100,000 - 700,000 THB", "range_income" ) Tol_mid_IncomeVol Tol_high_mid_IncomeVol = select_group_vol( range_Income, "700,000 - 2M THB", "range_income" ) Tol_high_mid_IncomeVol Tol_high_IncomeVol = select_group_vol(range_Income, "> 2M", "range_income") Tol_high_IncomeVol TotalVol_income = pd.DataFrame( { "Total Volume by income range": [ Tol_lowIncomeVol, Tol_mid_low_IncomeVol, Tol_mid_IncomeVol, Tol_high_mid_IncomeVol, Tol_high_IncomeVol, ], }, index=[ "<50,000 THB", "50,000 - 100,000 THB", "100,000 - 700,000 THB", "700,000 - 2M THB", "> 2M", ], ) TotalVol_income["percent per income rabge"] = ( TotalVol_income["Total Volume by income range"] / grand_total_vol ) * 100 TotalVol_income plot = TotalVol_income.plot.pie(y="Total Volume by income range", figsize=(5, 5)) # **KYC Tier** kyc_tier = dfVol.groupby("KYC Tier") Tol_T1_Vol = select_group_vol(kyc_tier, 1, "KYC Tier") Tol_T1_Vol Tol_T2_Vol = select_group_vol(kyc_tier, 2, "KYC Tier") Tol_T2_Vol Tol_T3_Vol = select_group_vol(kyc_tier, 3, "KYC Tier") Tol_T3_Vol TotalVol_KYC = pd.DataFrame( {"Total Volume by KYC Tier": [Tol_T1_Vol, Tol_T2_Vol, Tol_T3_Vol]}, index=["Tier 1", "Tier 2", "Tier 3"], ) TotalVol_KYC["percent per KYC tier"] = ( TotalVol_KYC["Total Volume by KYC Tier"] / grand_total_vol ) * 100 TotalVol_KYC plot = TotalVol_KYC.plot.pie(y="Total Volume by KYC Tier", figsize=(5, 5)) # Country Cluster # dfCC = pd.DataFrame( dfVol[dfVol["Nationality"] == "Thai"], columns=["Gender_Code", "range_income", "KYC Tier", "age"], ) dfCC CC = pd.get_dummies(dfCC) dfCC dfCC.describe() CC = pd.get_dummies(dfCC) CC = MinMaxScaler().fit_transform(CC) for k in range(2, 25): model = KMeans(n_clusters=k, random_state=1).fit(CC) labels = model.labels_ score = metrics.silhouette_score(CC, labels, metric="euclidean") print(k, "Score: %.2f" % score) km = KMeans(n_clusters=24) clust = km.fit_predict(CC) dfCC["Cluster"] = clust dfCC dfCC["age"].value_counts() dfJ = pd.DataFrame( dfVol[dfVol["Nationality"] == "Thai"], columns=["volume", "KYC Tier", "Gender_Code", "age"], ) ax = dfJ["volume"].plot.hist() sum = dfJ["volume"].sum() sum tierTwo = dfJ[dfJ["KYC Tier"] == 3] tierTwo["volume"].mean() tierOne = dfJ[dfJ["KYC Tier"] == 1] tierOne["volume"].mean() Female = dfJ[dfJ["Gender_Code"] == 0.0] Female["volume"].sum() each = dfJ.groupby(dfJ["age"]).mean() percent = (each / sum) * 100 each dfCC["Cluster"].value_counts() dfJ.plot.scatter(x="KYC Tier", y="volume") dfCC["KYC Tier"].value_counts() plot_crosstab(dfCC, 4, 0) plot_crosstab(dfCC, 4, 2) plot_crosstab(dfCC, 4, 1) plot_crosstab(dfCC, 4, 3) dfCC["Gender_Code"].value_counts() # # EDA 2.0 # Not by clustering dfLow["Nationality"].value_counts() import matplotlib.pyplot as plt values = dfLow["Nationality"].value_counts() colors = ["b", "g", "r", "c", "m", "y", "orange", "lime", "black"] labels = [ "Thai", "Japan", "Loas", "UK", "Russian", "India", "France", "Sonth Korea", "Others", ] explode = (0.2, 0, 0, 0, 0, 0, 0, 0, 0) plt.pie(values, explode=explode, counterclock=False, shadow=True) plt.title("Percenatge of each nationality in low volume group") plt.legend(labels, loc=3) plt.show() dfLow["Nationality"].value_counts().plot( kind="pie", explode=explode, title="Gender of people taking the survey that have income", fontsize=15, autopct="%1.1f%%", pctdistance=5, labeldistance=7, ) plt.show() dfLow["Gender_Code"].value_counts().plot( kind="pie", title="Percentage of male an", fontsize=15, autopct="%1.1f%%", pctdistance=0.5, labeldistance=1.2, ) plt.show() dfLow["KYC Tier"].value_counts().plot( kind="pie", title="Percentage of each KYC Tier in low volume group ", fontsize=15, autopct="%1.1f%%", pctdistance=0.5, labeldistance=1.2, ) plt.show() dfLow["range_income"].value_counts().plot( kind="pie", title="Percentage of each KYC Tier in low volume group ", fontsize=15, autopct="%1.1f%%", pctdistance=2.2, labeldistance=2.5, ) plt.show() # # Cluster analys 2 # **User age around 18-29** group_age = dfLow.groupby("age") dfLow_young = pd.DataFrame( group_age.get_group("18-29"), columns=["Gender_Code", "range_income", "KYC Tier", "Nationality"], ) dfLow_young Low_young = pd.get_dummies(dfLow_young) dfLow_young km = KMeans(n_clusters=9) clust = km.fit_predict(Low_young) dfLow_young["Cluster"] = clust dfLow_young.head() plot_crosstab(dfLow_young, 4, 0) plot_crosstab(dfLow_young, 4, 1) plot_crosstab(dfLow_young, 4, 2) plot_crosstab(dfLow_young, 4, 3) dfLowSC_column_young = ["Gender_Code", "range_income", "KYC Tier", "Nationality"] dfLow_youngSC = pd.DataFrame(group_age.get_group("18-29"), columns=dfLowSC_column_young) Low_youngSC = pd.get_dummies(dfLow_youngSC) Low_youngSC = MinMaxScaler().fit_transform(Low_youngSC) for k in range(2, 10): model = KMeans(n_clusters=k, random_state=1).fit(Low_youngSC) labels = model.labels_ score = metrics.silhouette_score(Low_youngSC, labels, metric="euclidean") print(k, "Score: %.2f" % score) dfLow_young["Nationality"].value_counts() # **user age 30-39** dfLow_th = pd.DataFrame( group_age.get_group("30-39"), columns=["Gender_Code", "range_income", "KYC Tier", "Nationality"], ) dfLow_th Low_th = pd.get_dummies(dfLow_th) dfLow_th dfLowSC_column_young = ["Gender_Code", "range_income", "KYC Tier", "Nationality"] dfLow_thSC = pd.DataFrame(group_age.get_group("30-39"), columns=dfLowSC_column_young) Low_thSC = pd.get_dummies(dfLow_thSC) Low_thSC = MinMaxScaler().fit_transform(Low_thSC) for k in range(2, 21): model = KMeans(n_clusters=k, random_state=1).fit(Low_thSC) labels = model.labels_ score = metrics.silhouette_score(Low_thSC, labels, metric="euclidean") print(k, "Score: %.2f" % score) km = KMeans(n_clusters=20) clust = km.fit_predict(Low_th) dfLow_th["Cluster"] = clust dfLow_th plot_crosstab(dfLow_th, 4, 0) plot_crosstab(dfLow_th, 4, 1) plot_crosstab(dfLow_th, 4, 2) plot_crosstab(dfLow_th, 4, 3) dfLow["range_income"].value_counts() # **user age 40-49****** dfLow_fo = pd.DataFrame( group_age.get_group("40-49"), columns=["Gender_Code", "range_income", "KYC Tier", "Nationality"], ) Low_fo = pd.get_dummies(dfLow_fo) dfLow_fo dfLow_foSC = pd.DataFrame(group_age.get_group("40-49"), columns=dfLowSC_column_young) Low_foSC = pd.get_dummies(dfLow_foSC) Low_foSC = MinMaxScaler().fit_transform(Low_foSC) for k in range(2, 21): model = KMeans(n_clusters=k, random_state=1).fit(Low_foSC) labels = model.labels_ score = metrics.silhouette_score(Low_foSC, labels, metric="euclidean") print(k, "Score: %.2f" % score) km = KMeans(n_clusters=19) clust = km.fit_predict(Low_fo) dfLow_fo["Cluster"] = clust dfLow_fo.head(2) plot_crosstab(dfLow_fo, 4, 2) plot_crosstab(dfLow_fo, 4, 0) plot_crosstab(dfLow_fo, 4, 3) plot_crosstab(dfLow_fo, 4, 1) # **age 50-51** dfLow_fi = pd.DataFrame( group_age.get_group("50-59"), columns=["Gender_Code", "range_income", "KYC Tier", "Nationality"], ) Low_fi = pd.get_dummies(dfLow_fi) dfLow_fi dfLow_fiSC = pd.DataFrame(group_age.get_group("50-59"), columns=dfLowSC_column_young) Low_fiSC = pd.get_dummies(dfLow_fiSC) Low_fiSC = MinMaxScaler().fit_transform(Low_fiSC) for k in range(2, 26): model = KMeans(n_clusters=k, random_state=1).fit(Low_fiSC) labels = model.labels_ score = metrics.silhouette_score(Low_fiSC, labels, metric="euclidean") print(k, "Score: %.2f" % score) km = km = KMeans(n_clusters=19) clust = km.fit_predict(Low_fi) dfLow_fi["Cluster"] = clust dfLow_fi plot_crosstab(dfLow_fi, 4, 2) plot_crosstab(dfLow_fi, 4, 0) plot_crosstab(dfLow_fi, 4, 3) plot_crosstab(dfLow_fi, 4, 1) # **age 60 and above** dfLow_si = pd.DataFrame( group_age.get_group("60 above"), columns=["Gender_Code", "range_income", "KYC Tier", "Nationality"], ) Low_si = pd.get_dummies(dfLow_si) dfLow_si slt_df = dfLow_si[dfLow_si["range_income"] == "<50,000 THB"] dfLow["Nationality"].value_counts() dfLow_siSC = pd.DataFrame(group_age.get_group("60 above"), columns=dfLowSC_column_young) Low_siSC = pd.get_dummies(dfLow_siSC) Low_siSC = MinMaxScaler().fit_transform(Low_siSC) for k in range(2, 16): model = KMeans(n_clusters=k, random_state=1).fit(Low_siSC) labels = model.labels_ score = metrics.silhouette_score(Low_siSC, labels, metric="euclidean") print(k, "Score: %.2f" % score) km = km = KMeans(n_clusters=12) clust = km.fit_predict(Low_si) dfLow_si["Cluster"] = clust dfLow_si.head(2) plot_crosstab(dfLow_si, 4, 2) plot_crosstab(dfLow_si, 4, 0) plot_crosstab(dfLow_si, 4, 3) plot_crosstab(dfLow_si, 4, 1)
false
0
7,733
0
7,733
7,733
69775643
from PIL import Image from PIL import TiffTags import numpy as np import pandas as pd import matplotlib.pyplot as plt import pathlib from pathlib import Path from tqdm import tqdm import time import cv2 import logging import os import sys import tempfile from glob import glob def get_pixelsize(filename, size=100): img = Image.open(filename) img_array = np.array(img.convert("RGB")) x = [] count = [] for i in range(img_array.shape[0]): num = 0 for k in range(img_array.shape[1]): c = img_array[i, k] if c[0] > 240 and c[1] < 50 and c[2] < 50: # 赤色の部分だけ取ってくる。 num += 1 x.append(i + 1) count.append(num) t = max(count) pixelsize = t / size print("{} μmあたり{} 個のピクセルがあります。".format(size, t)) print("ピクセル1個あたり{} μmです。".format(pixelsize)) return pixelsize def preprocess(img, th=150, complement=200): # th:閾値、complement:閾値を変換させたときの濃度(0~255) img_bitwise = cv2.bitwise_not(img) img_gray = cv2.cvtColor(img_bitwise, cv2.COLOR_BGR2GRAY) ret, img_binary = cv2.threshold(img_gray, th, 255, cv2.THRESH_BINARY) external_contours = np.zeros(img.shape) contours, hierarchy = cv2.findContours( img_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE ) img_contour = cv2.drawContours(external_contours, contours, -1, (255, 255, 255), -1) bool_array = img_contour != 0 img_new = img.copy() img_new[bool_array] = complement return img_new def countur_detection( img_origin, th1=120, th2=250, color=(0, 255, 0) ): # th1:閾値 th2:閾値を変換させたときの濃度(0~255) img = cv2.bitwise_not(img_origin) img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, img_binary = cv2.threshold(img_gray, th1, th2, cv2.THRESH_BINARY) img_origin_copy = img_origin.copy() contours, hierarchy = cv2.findContours( img_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE ) img_contour = cv2.drawContours(img_origin_copy, contours, -1, color, 5) return contours, hierarchy, img_contour def output_contour( image, contours, th=1000, con_color=(0, 255, 0), rec_color=(255, 0, 0) ): id_list = [] area_list = [] img_copy = image.copy() for i in range(len(contours)): if (cv2.contourArea(contours[i])) > th: # 変な輪郭は拾わないようにする。 ax = contours[i] countour_x = 0 countour_y = 0 for k in range(ax.shape[0]): x = ax[k, 0, 0] y = ax[k, 0, 1] if x == 0 or x == img_copy.shape[1] - 1: countour_x += 1 if y == 0 or y == img_copy.shape[0] - 1: countour_y += 1 if countour_x > 5 or countour_y > 5: pass else: id_list.append(i) area_list.append(cv2.contourArea(contours[i])) retval_list = [] for i in range(len(id_list)): im_con = image.copy() im_con2 = cv2.drawContours(im_con, contours, id_list[i], con_color, 5) print("ID", id_list[i], "Area", cv2.contourArea(contours[id_list[i]])) x, y, w, h = cv2.boundingRect(contours[id_list[i]]) retval_list.append([x, y, w, h]) new_im = cv2.rectangle(im_con2, (x, y), (x + w, y + h), rec_color, cv2.LINE_4) plt.imshow(new_im) plt.show() print("出力が完了しました。") # 入力画面が出ない場合は下の出力画面を消して、再実行すること。 return id_list, area_list, retval_list path_list = [] path = "../input/mnes-muscle-cell-area/Image" for i in range(len(os.listdir(path))): path_list.append(path + "/img" + str(i + 1) + ".tiff") print(path_list) df_list = [] for file in path_list: z = get_pixelsize(file) img = Image.open(file) img_array = np.array(img.convert("RGB")) r_img = img_array[:, :, 0] g_img = img_array[:, :, 1] b_img = img_array[:, :, 2] r_thresh, r_bin = cv2.threshold( r_img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU ) r_bin = cv2.bitwise_not(r_bin) g_thresh, g_bin = cv2.threshold( g_img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU ) b_thresh, b_bin = cv2.threshold( b_img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU ) img = (r_bin / 255) * ((g_bin / 255) + (b_bin / 255)) new_img = np.where(img > 1, 1, img) a = np.zeros((new_img.shape[0], new_img.shape[1], 3)) b = new_img.reshape(new_img.shape[0], new_img.shape[1], 1) c = a + b d = c * 255 bool_array = d == 0 img_new = img_array.copy() img_new[bool_array] = 255 img_origin = cv2.cvtColor(img_new, cv2.COLOR_RGB2BGR) img_new = preprocess(img_origin, 145) contours2, hi, img_contour = countur_detection(img_new, 120, 250) id_list, area_list, retval_list = output_contour(img_new, contours2) x = len(id_list) df = pd.DataFrame(np.arange(2 * x).reshape(x, 2), columns=["id", "area_pixel"]) df.id = id_list df.area_pixel = area_list df["area"] = df.area_pixel * (z**2) df["imagelabel"] = file df_list.append(df) df_sample = pd.read_csv("../input/mnes-muscle-cell-area/sample_submission.csv") df_sample_dict = {} a = 0 b = 1 for i in range(len(df_sample)): t = df_sample.ID.loc[i] u = t.find("g") w = t.find("-") n = int(t[u + 1 : w]) if n == b: pass else: df = df_sample[a:i] df_sample_dict[b] = df a = i b = n df_sample_dict[b] = df_sample[a:] for i in range(len(df_list)): try: t = df_list[i].imagelabel.loc[0] print(t) u = t.rfind("g") w = t.rfind(".") l = int(t[u + 1 : w]) df_s = df_sample_dict[l] x = len(df_list[i]) y = len(df_s) area = (df_list[i].area / 1000).to_list() area.sort() if x <= y: n = area[-1] area.extend([n for i in range(y - x)]) else: area = area[:y] df_s.AREA = area df_sample_dict[l] = df_s except KeyError: print("データがありません。") df_predict_list = [] for i in range(len(df_sample_dict)): df_predict_list.append(df_sample_dict[i + 1]) df_predict = pd.concat(df_predict_list) df_predict df_predict.to_csv("benchmark.csv", index=False, encoding="utf-8")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/775/69775643.ipynb
null
null
[{"Id": 69775643, "ScriptId": 19061570, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5743120, "CreationDate": "08/03/2021 07:19:44", "VersionNumber": 1.0, "Title": "Benchmark", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 195.0, "LinesInsertedFromPrevious": 195.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
from PIL import Image from PIL import TiffTags import numpy as np import pandas as pd import matplotlib.pyplot as plt import pathlib from pathlib import Path from tqdm import tqdm import time import cv2 import logging import os import sys import tempfile from glob import glob def get_pixelsize(filename, size=100): img = Image.open(filename) img_array = np.array(img.convert("RGB")) x = [] count = [] for i in range(img_array.shape[0]): num = 0 for k in range(img_array.shape[1]): c = img_array[i, k] if c[0] > 240 and c[1] < 50 and c[2] < 50: # 赤色の部分だけ取ってくる。 num += 1 x.append(i + 1) count.append(num) t = max(count) pixelsize = t / size print("{} μmあたり{} 個のピクセルがあります。".format(size, t)) print("ピクセル1個あたり{} μmです。".format(pixelsize)) return pixelsize def preprocess(img, th=150, complement=200): # th:閾値、complement:閾値を変換させたときの濃度(0~255) img_bitwise = cv2.bitwise_not(img) img_gray = cv2.cvtColor(img_bitwise, cv2.COLOR_BGR2GRAY) ret, img_binary = cv2.threshold(img_gray, th, 255, cv2.THRESH_BINARY) external_contours = np.zeros(img.shape) contours, hierarchy = cv2.findContours( img_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE ) img_contour = cv2.drawContours(external_contours, contours, -1, (255, 255, 255), -1) bool_array = img_contour != 0 img_new = img.copy() img_new[bool_array] = complement return img_new def countur_detection( img_origin, th1=120, th2=250, color=(0, 255, 0) ): # th1:閾値 th2:閾値を変換させたときの濃度(0~255) img = cv2.bitwise_not(img_origin) img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, img_binary = cv2.threshold(img_gray, th1, th2, cv2.THRESH_BINARY) img_origin_copy = img_origin.copy() contours, hierarchy = cv2.findContours( img_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE ) img_contour = cv2.drawContours(img_origin_copy, contours, -1, color, 5) return contours, hierarchy, img_contour def output_contour( image, contours, th=1000, con_color=(0, 255, 0), rec_color=(255, 0, 0) ): id_list = [] area_list = [] img_copy = image.copy() for i in range(len(contours)): if (cv2.contourArea(contours[i])) > th: # 変な輪郭は拾わないようにする。 ax = contours[i] countour_x = 0 countour_y = 0 for k in range(ax.shape[0]): x = ax[k, 0, 0] y = ax[k, 0, 1] if x == 0 or x == img_copy.shape[1] - 1: countour_x += 1 if y == 0 or y == img_copy.shape[0] - 1: countour_y += 1 if countour_x > 5 or countour_y > 5: pass else: id_list.append(i) area_list.append(cv2.contourArea(contours[i])) retval_list = [] for i in range(len(id_list)): im_con = image.copy() im_con2 = cv2.drawContours(im_con, contours, id_list[i], con_color, 5) print("ID", id_list[i], "Area", cv2.contourArea(contours[id_list[i]])) x, y, w, h = cv2.boundingRect(contours[id_list[i]]) retval_list.append([x, y, w, h]) new_im = cv2.rectangle(im_con2, (x, y), (x + w, y + h), rec_color, cv2.LINE_4) plt.imshow(new_im) plt.show() print("出力が完了しました。") # 入力画面が出ない場合は下の出力画面を消して、再実行すること。 return id_list, area_list, retval_list path_list = [] path = "../input/mnes-muscle-cell-area/Image" for i in range(len(os.listdir(path))): path_list.append(path + "/img" + str(i + 1) + ".tiff") print(path_list) df_list = [] for file in path_list: z = get_pixelsize(file) img = Image.open(file) img_array = np.array(img.convert("RGB")) r_img = img_array[:, :, 0] g_img = img_array[:, :, 1] b_img = img_array[:, :, 2] r_thresh, r_bin = cv2.threshold( r_img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU ) r_bin = cv2.bitwise_not(r_bin) g_thresh, g_bin = cv2.threshold( g_img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU ) b_thresh, b_bin = cv2.threshold( b_img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU ) img = (r_bin / 255) * ((g_bin / 255) + (b_bin / 255)) new_img = np.where(img > 1, 1, img) a = np.zeros((new_img.shape[0], new_img.shape[1], 3)) b = new_img.reshape(new_img.shape[0], new_img.shape[1], 1) c = a + b d = c * 255 bool_array = d == 0 img_new = img_array.copy() img_new[bool_array] = 255 img_origin = cv2.cvtColor(img_new, cv2.COLOR_RGB2BGR) img_new = preprocess(img_origin, 145) contours2, hi, img_contour = countur_detection(img_new, 120, 250) id_list, area_list, retval_list = output_contour(img_new, contours2) x = len(id_list) df = pd.DataFrame(np.arange(2 * x).reshape(x, 2), columns=["id", "area_pixel"]) df.id = id_list df.area_pixel = area_list df["area"] = df.area_pixel * (z**2) df["imagelabel"] = file df_list.append(df) df_sample = pd.read_csv("../input/mnes-muscle-cell-area/sample_submission.csv") df_sample_dict = {} a = 0 b = 1 for i in range(len(df_sample)): t = df_sample.ID.loc[i] u = t.find("g") w = t.find("-") n = int(t[u + 1 : w]) if n == b: pass else: df = df_sample[a:i] df_sample_dict[b] = df a = i b = n df_sample_dict[b] = df_sample[a:] for i in range(len(df_list)): try: t = df_list[i].imagelabel.loc[0] print(t) u = t.rfind("g") w = t.rfind(".") l = int(t[u + 1 : w]) df_s = df_sample_dict[l] x = len(df_list[i]) y = len(df_s) area = (df_list[i].area / 1000).to_list() area.sort() if x <= y: n = area[-1] area.extend([n for i in range(y - x)]) else: area = area[:y] df_s.AREA = area df_sample_dict[l] = df_s except KeyError: print("データがありません。") df_predict_list = [] for i in range(len(df_sample_dict)): df_predict_list.append(df_sample_dict[i + 1]) df_predict = pd.concat(df_predict_list) df_predict df_predict.to_csv("benchmark.csv", index=False, encoding="utf-8")
false
0
2,387
0
2,387
2,387
69775785
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns import random from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit from sklearn.preprocessing import MinMaxScaler, StandardScaler import optuna # Pandas setting to display more dataset rows and columns pd.set_option("display.max_rows", 150) pd.set_option("display.max_columns", 500) pd.set_option("display.max_colwidth", None) pd.set_option("display.float_format", lambda x: "%.5f" % x) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## **Data import** train = pd.read_csv( "/kaggle/input/tabular-playground-series-aug-2021/train.csv", low_memory=False ) # , nrows=10000) # train["date_time"] = pd.to_datetime(train["date_time"], format="%Y-%m-%d %H:%M:%S") test = pd.read_csv( "/kaggle/input/tabular-playground-series-aug-2021/test.csv", low_memory=False ) # test["date_time"] = pd.to_datetime(test["date_time"], format="%Y-%m-%d %H:%M:%S") train.info(memory_usage="deep") test.info(memory_usage="deep") train.head(10) # # **EDA** # Colors to be used for plots colors = [ "lightcoral", "sandybrown", "darkorange", "mediumseagreen", "lightseagreen", "cornflowerblue", "mediumpurple", "palevioletred", "lightskyblue", "sandybrown", "yellowgreen", "indianred", "lightsteelblue", "mediumorchid", "deepskyblue", ] fig, ax = plt.subplots(figsize=(5, 5)) pie = ax.pie( [len(train), len(test)], labels=["Train dataset", "Test dataset"], colors=["salmon", "teal"], textprops={"fontsize": 15}, autopct="%1.1f%%", ) ax.axis("equal") ax.set_title("Dataset length comparison", fontsize=18) fig.set_facecolor("white") plt.show() train.describe().T train.isna().sum().sum(), test.isna().sum().sum() # There are no missing value in the both datasets. # Lets check target distribution. train["loss"].value_counts() fig, ax = plt.subplots(figsize=(16, 8)) bars = ax.bar( train["loss"].value_counts().sort_index().index, train["loss"].value_counts().sort_index().values, color=colors, edgecolor="black", ) ax.set_title("Loss (target) distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Loss (target) value", fontsize=14, labelpad=10) ax.bar_label( bars, [ f"{x:2.2f}%" for x in train["loss"].value_counts().sort_index().values / (len(train) / 100) ], padding=5, fontsize=10, rotation=90, ) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show() # Lets check feature values distribution in the both datasets. df = pd.concat([train.drop(["id", "loss"], axis=1), test.drop("id", axis=1)], axis=0) columns = df.columns.values cols = 3 rows = len(columns) // cols + 1 fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(16, 100), sharex=False) plt.subplots_adjust(hspace=0.3) i = 0 for r in np.arange(0, rows, 1): for c in np.arange(0, cols, 1): if i >= len(columns): axs[r, c].set_visible(False) else: hist1 = axs[r, c].hist( train[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="deepskyblue", edgecolor="black", alpha=0.7, label="Train Dataset", ) hist2 = axs[r, c].hist( test[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="palevioletred", edgecolor="black", alpha=0.7, label="Test Dataset", ) axs[r, c].set_title(columns[i], fontsize=14, pad=5) axs[r, c].tick_params(axis="y", labelsize=13) axs[r, c].tick_params(axis="x", labelsize=13) axs[r, c].grid(axis="y") axs[r, c].legend(fontsize=13) i += 1 # plt.suptitle("Feature values distribution in both datasets", y=0.99) plt.show() # The datasets are pretty well balanced. train.nunique().sort_values().head() # As you can see, f1 feature has the smallest amount of unique values - 289. So I don't think any feature should be treated as categorical. # Lets look at feature correlation. # Plot dataframe df = train.drop("id", axis=1).corr().round(5) # Mask to hide upper-right part of plot as it is a duplicate mask = np.zeros_like(df) mask[np.triu_indices_from(mask)] = True # Making a plot plt.figure(figsize=(16, 16)) ax = sns.heatmap( df, annot=False, mask=mask, cmap="RdBu", annot_kws={"weight": "bold", "fontsize": 13}, ) ax.set_title("Feature correlation heatmap", fontsize=17) plt.setp( ax.get_xticklabels(), rotation=90, ha="right", rotation_mode="anchor", weight="normal", ) plt.setp( ax.get_yticklabels(), weight="normal", rotation_mode="anchor", rotation=0, ha="right", ) plt.show() # As you can see, the correlation is between ~0.03 and ~0.03 which is pretty small. So the features are weakly correlated. # There are some features with relatively low correlation with target value even comparing with other features: df[(df["loss"] > -0.001) & (df["loss"] < 0.001)]["loss"] # Lets visualize each feature vs loss. columns = train.drop(["id", "loss"], axis=1).columns.values cols = 4 rows = len(columns) // cols + 1 fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(16, 100), sharex=False) plt.subplots_adjust(hspace=0.3) i = 0 for r in np.arange(0, rows, 1): for c in np.arange(0, cols, 1): if i >= len(columns): axs[r, c].set_visible(False) else: scatter = axs[r, c].scatter( train[columns[i]].values, train["loss"], color=random.choice(colors) ) axs[r, c].set_title(columns[i], fontsize=14, pad=5) axs[r, c].tick_params(axis="y", labelsize=11) axs[r, c].tick_params(axis="x", labelsize=11) i += 1 # plt.suptitle("Features vs loss", y=0.99) plt.show() # # **Data preparation** # Calculating edges of target bins to be used for stratified split target_bin_edges = np.histogram_bin_edges(train["loss"], bins=10) target_bin_edges[0] = -np.inf target_bin_edges[-1] = np.inf target_bins = pd.cut(train["loss"], target_bin_edges, labels=np.arange(10)) target_bins.value_counts() # Scaling data x_scaler = StandardScaler() X = pd.DataFrame( x_scaler.fit_transform(train.drop(["id", "loss"], axis=1)), columns=train.drop(["id", "loss"], axis=1).columns, ) X_test = pd.DataFrame( x_scaler.transform(test.drop("id", axis=1)), columns=test.drop(["id"], axis=1).columns, ) y = train["loss"].copy() X.describe() X_test.describe() y.min(), y.max() # # **Hyperparameters optimization** def train_model_optuna(trial, X_train, X_valid, y_train, y_valid): """ A function to train a model using different hyperparamerters combinations provided by Optuna. Loss of validation data predictions is returned to estimate hyperparameters effectiveness. """ preds = 0 # A set of hyperparameters to optimize by optuna xgb_params = { "n_estimators": trial.suggest_categorical("n_estimators", [10000]), "learning_rate": trial.suggest_float("learning_rate", 0.01, 0.8), "subsample": trial.suggest_float("subsample", 0.5, 1), "colsample_bytree": trial.suggest_float("colsample_bytree", 0.5, 1), "max_depth": trial.suggest_int("max_depth", 2, 16), "booster": trial.suggest_categorical("booster", ["gbtree"]), "tree_method": trial.suggest_categorical("tree_method", ["gpu_hist"]), "reg_lambda": trial.suggest_float("reg_lambda", 0.00001, 0.9), "reg_alpha": trial.suggest_float("reg_alpha", 0.00001, 0.9), "random_state": trial.suggest_categorical("random_state", [42]), "n_jobs": trial.suggest_categorical("n_jobs", [4]), } # Model loading and training model = XGBRegressor(**xgb_params) model.fit( X_train, y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_metric="rmse", early_stopping_rounds=100, verbose=False, ) print(f"Number of boosting rounds: {model.best_iteration}") oof = model.predict(X_valid) oof[oof < 0] = 0 return np.sqrt(mean_squared_error(y_valid, oof)) # The code below is commented in order to save runtime. # %%time # # Splitting data into train and valid folds using target bins for stratification # split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) # for train_idx, valid_idx in split.split(X, target_bins): # X_train, X_valid = X.loc[train_idx], X.loc[valid_idx] # y_train, y_valid = y.loc[train_idx], y.loc[valid_idx] # # Setting optuna verbosity to show only warning messages # # If the line is uncommeted each iteration results will be shown # # optuna.logging.set_verbosity(optuna.logging.WARNING) # time_limit = 3600 * 8.5 # study = optuna.create_study(direction='minimize') # study.optimize(lambda trial: train_model_optuna(trial, X_train, X_valid, # y_train, y_valid), # n_trials = 100, # # timeout=time_limit # ) # # Showing optimization results # print('Number of finished trials:', len(study.trials)) # print('Best trial parameters:', study.best_trial.params) # print('Best score:', study.best_value) # # **Model training** # Hyperparameters optimized by Optuna xgb_params = { "n_estimators": 10000, "learning_rate": 0.025677082089199325, "subsample": 0.9074590897812683, "colsample_bytree": 0.9256262771216822, "max_depth": 5, "booster": "gbtree", "tree_method": "gpu_hist", "reg_lambda": 0.516372194087482, "reg_alpha": 0.43295480301318884, "random_state": 42, "n_jobs": 4, } splits = 10 skf = StratifiedKFold(n_splits=splits, shuffle=True, random_state=42) oof_preds = np.zeros((X.shape[0],)) preds = 0 model_fi = 0 total_mean_rmse = 0 for num, (train_idx, valid_idx) in enumerate(skf.split(X, target_bins)): X_train, X_valid = X.loc[train_idx], X.loc[valid_idx] y_train, y_valid = y.loc[train_idx], y.loc[valid_idx] model = XGBRegressor(**xgb_params) model.fit( X_train, y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_metric="rmse", early_stopping_rounds=100, verbose=False, ) preds += model.predict(X_test) / splits model_fi += model.feature_importances_ oof_preds[valid_idx] = model.predict(X_valid) oof_preds[oof_preds < 0] = 0 # fold_rmse = np.sqrt(mean_squared_error(y_scaler.inverse_transform(np.array(y_valid).reshape(-1,1)), y_scaler.inverse_transform(np.array(oof_preds[valid_idx]).reshape(-1,1)))) fold_rmse = np.sqrt(mean_squared_error(y_valid, oof_preds[valid_idx])) print(f"Fold {num} RMSE: {fold_rmse}") # print(f"Trees: {model.tree_count_}") total_mean_rmse += fold_rmse / splits print(f"\nOverall RMSE: {total_mean_rmse}") # ## **Feature importances** df = pd.DataFrame(columns=["Feature", "Importance"]) df["Feature"] = X.columns df["Importance"] = model_fi / model_fi.sum() df.sort_values("Importance", axis=0, ascending=False, inplace=True) x = np.arange(0, len(df["Feature"])) height = 0.4 fig, ax = plt.subplots(figsize=(16, 30)) bars1 = ax.barh( x, df["Importance"], height=height, color="mediumorchid", edgecolor="black" ) ax.set_title("Feature importances", fontsize=30, pad=15) ax.set_ylabel("Feature names", fontsize=20, labelpad=15) ax.set_xlabel("Feature importance", fontsize=20, labelpad=15) ax.set_yticks(x) ax.set_yticklabels(df["Feature"], fontsize=15) ax.tick_params(axis="x", labelsize=15) ax.grid(axis="x") ax2 = ax.secondary_xaxis("top") ax2.set_xlabel("Feature importance", fontsize=20, labelpad=15) ax2.tick_params(axis="x", labelsize=15) plt.margins(0.04, 0.01) plt.gca().invert_yaxis() # ## **Submission** predictions = pd.DataFrame() predictions["id"] = test["id"] predictions["loss"] = preds predictions.to_csv("submission.csv", index=False, header=predictions.columns) predictions.head()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/775/69775785.ipynb
null
null
[{"Id": 69775785, "ScriptId": 19068521, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6788322, "CreationDate": "08/03/2021 07:20:43", "VersionNumber": 1.0, "Title": "TPS-08-21 XGBoost", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 356.0, "LinesInsertedFromPrevious": 356.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns import random from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit from sklearn.preprocessing import MinMaxScaler, StandardScaler import optuna # Pandas setting to display more dataset rows and columns pd.set_option("display.max_rows", 150) pd.set_option("display.max_columns", 500) pd.set_option("display.max_colwidth", None) pd.set_option("display.float_format", lambda x: "%.5f" % x) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # ## **Data import** train = pd.read_csv( "/kaggle/input/tabular-playground-series-aug-2021/train.csv", low_memory=False ) # , nrows=10000) # train["date_time"] = pd.to_datetime(train["date_time"], format="%Y-%m-%d %H:%M:%S") test = pd.read_csv( "/kaggle/input/tabular-playground-series-aug-2021/test.csv", low_memory=False ) # test["date_time"] = pd.to_datetime(test["date_time"], format="%Y-%m-%d %H:%M:%S") train.info(memory_usage="deep") test.info(memory_usage="deep") train.head(10) # # **EDA** # Colors to be used for plots colors = [ "lightcoral", "sandybrown", "darkorange", "mediumseagreen", "lightseagreen", "cornflowerblue", "mediumpurple", "palevioletred", "lightskyblue", "sandybrown", "yellowgreen", "indianred", "lightsteelblue", "mediumorchid", "deepskyblue", ] fig, ax = plt.subplots(figsize=(5, 5)) pie = ax.pie( [len(train), len(test)], labels=["Train dataset", "Test dataset"], colors=["salmon", "teal"], textprops={"fontsize": 15}, autopct="%1.1f%%", ) ax.axis("equal") ax.set_title("Dataset length comparison", fontsize=18) fig.set_facecolor("white") plt.show() train.describe().T train.isna().sum().sum(), test.isna().sum().sum() # There are no missing value in the both datasets. # Lets check target distribution. train["loss"].value_counts() fig, ax = plt.subplots(figsize=(16, 8)) bars = ax.bar( train["loss"].value_counts().sort_index().index, train["loss"].value_counts().sort_index().values, color=colors, edgecolor="black", ) ax.set_title("Loss (target) distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Loss (target) value", fontsize=14, labelpad=10) ax.bar_label( bars, [ f"{x:2.2f}%" for x in train["loss"].value_counts().sort_index().values / (len(train) / 100) ], padding=5, fontsize=10, rotation=90, ) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show() # Lets check feature values distribution in the both datasets. df = pd.concat([train.drop(["id", "loss"], axis=1), test.drop("id", axis=1)], axis=0) columns = df.columns.values cols = 3 rows = len(columns) // cols + 1 fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(16, 100), sharex=False) plt.subplots_adjust(hspace=0.3) i = 0 for r in np.arange(0, rows, 1): for c in np.arange(0, cols, 1): if i >= len(columns): axs[r, c].set_visible(False) else: hist1 = axs[r, c].hist( train[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="deepskyblue", edgecolor="black", alpha=0.7, label="Train Dataset", ) hist2 = axs[r, c].hist( test[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="palevioletred", edgecolor="black", alpha=0.7, label="Test Dataset", ) axs[r, c].set_title(columns[i], fontsize=14, pad=5) axs[r, c].tick_params(axis="y", labelsize=13) axs[r, c].tick_params(axis="x", labelsize=13) axs[r, c].grid(axis="y") axs[r, c].legend(fontsize=13) i += 1 # plt.suptitle("Feature values distribution in both datasets", y=0.99) plt.show() # The datasets are pretty well balanced. train.nunique().sort_values().head() # As you can see, f1 feature has the smallest amount of unique values - 289. So I don't think any feature should be treated as categorical. # Lets look at feature correlation. # Plot dataframe df = train.drop("id", axis=1).corr().round(5) # Mask to hide upper-right part of plot as it is a duplicate mask = np.zeros_like(df) mask[np.triu_indices_from(mask)] = True # Making a plot plt.figure(figsize=(16, 16)) ax = sns.heatmap( df, annot=False, mask=mask, cmap="RdBu", annot_kws={"weight": "bold", "fontsize": 13}, ) ax.set_title("Feature correlation heatmap", fontsize=17) plt.setp( ax.get_xticklabels(), rotation=90, ha="right", rotation_mode="anchor", weight="normal", ) plt.setp( ax.get_yticklabels(), weight="normal", rotation_mode="anchor", rotation=0, ha="right", ) plt.show() # As you can see, the correlation is between ~0.03 and ~0.03 which is pretty small. So the features are weakly correlated. # There are some features with relatively low correlation with target value even comparing with other features: df[(df["loss"] > -0.001) & (df["loss"] < 0.001)]["loss"] # Lets visualize each feature vs loss. columns = train.drop(["id", "loss"], axis=1).columns.values cols = 4 rows = len(columns) // cols + 1 fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(16, 100), sharex=False) plt.subplots_adjust(hspace=0.3) i = 0 for r in np.arange(0, rows, 1): for c in np.arange(0, cols, 1): if i >= len(columns): axs[r, c].set_visible(False) else: scatter = axs[r, c].scatter( train[columns[i]].values, train["loss"], color=random.choice(colors) ) axs[r, c].set_title(columns[i], fontsize=14, pad=5) axs[r, c].tick_params(axis="y", labelsize=11) axs[r, c].tick_params(axis="x", labelsize=11) i += 1 # plt.suptitle("Features vs loss", y=0.99) plt.show() # # **Data preparation** # Calculating edges of target bins to be used for stratified split target_bin_edges = np.histogram_bin_edges(train["loss"], bins=10) target_bin_edges[0] = -np.inf target_bin_edges[-1] = np.inf target_bins = pd.cut(train["loss"], target_bin_edges, labels=np.arange(10)) target_bins.value_counts() # Scaling data x_scaler = StandardScaler() X = pd.DataFrame( x_scaler.fit_transform(train.drop(["id", "loss"], axis=1)), columns=train.drop(["id", "loss"], axis=1).columns, ) X_test = pd.DataFrame( x_scaler.transform(test.drop("id", axis=1)), columns=test.drop(["id"], axis=1).columns, ) y = train["loss"].copy() X.describe() X_test.describe() y.min(), y.max() # # **Hyperparameters optimization** def train_model_optuna(trial, X_train, X_valid, y_train, y_valid): """ A function to train a model using different hyperparamerters combinations provided by Optuna. Loss of validation data predictions is returned to estimate hyperparameters effectiveness. """ preds = 0 # A set of hyperparameters to optimize by optuna xgb_params = { "n_estimators": trial.suggest_categorical("n_estimators", [10000]), "learning_rate": trial.suggest_float("learning_rate", 0.01, 0.8), "subsample": trial.suggest_float("subsample", 0.5, 1), "colsample_bytree": trial.suggest_float("colsample_bytree", 0.5, 1), "max_depth": trial.suggest_int("max_depth", 2, 16), "booster": trial.suggest_categorical("booster", ["gbtree"]), "tree_method": trial.suggest_categorical("tree_method", ["gpu_hist"]), "reg_lambda": trial.suggest_float("reg_lambda", 0.00001, 0.9), "reg_alpha": trial.suggest_float("reg_alpha", 0.00001, 0.9), "random_state": trial.suggest_categorical("random_state", [42]), "n_jobs": trial.suggest_categorical("n_jobs", [4]), } # Model loading and training model = XGBRegressor(**xgb_params) model.fit( X_train, y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_metric="rmse", early_stopping_rounds=100, verbose=False, ) print(f"Number of boosting rounds: {model.best_iteration}") oof = model.predict(X_valid) oof[oof < 0] = 0 return np.sqrt(mean_squared_error(y_valid, oof)) # The code below is commented in order to save runtime. # %%time # # Splitting data into train and valid folds using target bins for stratification # split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) # for train_idx, valid_idx in split.split(X, target_bins): # X_train, X_valid = X.loc[train_idx], X.loc[valid_idx] # y_train, y_valid = y.loc[train_idx], y.loc[valid_idx] # # Setting optuna verbosity to show only warning messages # # If the line is uncommeted each iteration results will be shown # # optuna.logging.set_verbosity(optuna.logging.WARNING) # time_limit = 3600 * 8.5 # study = optuna.create_study(direction='minimize') # study.optimize(lambda trial: train_model_optuna(trial, X_train, X_valid, # y_train, y_valid), # n_trials = 100, # # timeout=time_limit # ) # # Showing optimization results # print('Number of finished trials:', len(study.trials)) # print('Best trial parameters:', study.best_trial.params) # print('Best score:', study.best_value) # # **Model training** # Hyperparameters optimized by Optuna xgb_params = { "n_estimators": 10000, "learning_rate": 0.025677082089199325, "subsample": 0.9074590897812683, "colsample_bytree": 0.9256262771216822, "max_depth": 5, "booster": "gbtree", "tree_method": "gpu_hist", "reg_lambda": 0.516372194087482, "reg_alpha": 0.43295480301318884, "random_state": 42, "n_jobs": 4, } splits = 10 skf = StratifiedKFold(n_splits=splits, shuffle=True, random_state=42) oof_preds = np.zeros((X.shape[0],)) preds = 0 model_fi = 0 total_mean_rmse = 0 for num, (train_idx, valid_idx) in enumerate(skf.split(X, target_bins)): X_train, X_valid = X.loc[train_idx], X.loc[valid_idx] y_train, y_valid = y.loc[train_idx], y.loc[valid_idx] model = XGBRegressor(**xgb_params) model.fit( X_train, y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_metric="rmse", early_stopping_rounds=100, verbose=False, ) preds += model.predict(X_test) / splits model_fi += model.feature_importances_ oof_preds[valid_idx] = model.predict(X_valid) oof_preds[oof_preds < 0] = 0 # fold_rmse = np.sqrt(mean_squared_error(y_scaler.inverse_transform(np.array(y_valid).reshape(-1,1)), y_scaler.inverse_transform(np.array(oof_preds[valid_idx]).reshape(-1,1)))) fold_rmse = np.sqrt(mean_squared_error(y_valid, oof_preds[valid_idx])) print(f"Fold {num} RMSE: {fold_rmse}") # print(f"Trees: {model.tree_count_}") total_mean_rmse += fold_rmse / splits print(f"\nOverall RMSE: {total_mean_rmse}") # ## **Feature importances** df = pd.DataFrame(columns=["Feature", "Importance"]) df["Feature"] = X.columns df["Importance"] = model_fi / model_fi.sum() df.sort_values("Importance", axis=0, ascending=False, inplace=True) x = np.arange(0, len(df["Feature"])) height = 0.4 fig, ax = plt.subplots(figsize=(16, 30)) bars1 = ax.barh( x, df["Importance"], height=height, color="mediumorchid", edgecolor="black" ) ax.set_title("Feature importances", fontsize=30, pad=15) ax.set_ylabel("Feature names", fontsize=20, labelpad=15) ax.set_xlabel("Feature importance", fontsize=20, labelpad=15) ax.set_yticks(x) ax.set_yticklabels(df["Feature"], fontsize=15) ax.tick_params(axis="x", labelsize=15) ax.grid(axis="x") ax2 = ax.secondary_xaxis("top") ax2.set_xlabel("Feature importance", fontsize=20, labelpad=15) ax2.tick_params(axis="x", labelsize=15) plt.margins(0.04, 0.01) plt.gca().invert_yaxis() # ## **Submission** predictions = pd.DataFrame() predictions["id"] = test["id"] predictions["loss"] = preds predictions.to_csv("submission.csv", index=False, header=predictions.columns) predictions.head()
false
0
4,332
0
4,332
4,332
69775646
# Tabular Playground Series - Aug 2021 # Table of Contents # - [Introduction](#Introduction) # - [Libraries](#Import-Libraries) # - [Read and Understand Data](#Read-and-Understand-Data) # - [Exploratory Data Analysis](#Exploratory-Data-Analysis) # - [Data Preparation](#Data-Preparation) # - [Model Building Logistic Regression](#Model-Building-Logistic-Regression) # - [HyperParameter Tuning](#Hyperparameter-Tuning) # - [Conclusion](#Conclusion) # - [Business Recommendations & Insights](#Business-Recommendations-&-Insights) # # Introduction # Kaggle competitions are incredibly fun and rewarding, but they can also be intimidating for people who are relatively new in their data science journey. In the past, Kaggle have launched many Playground competitions that are more approachable than Featured competition, and thus more beginner-friendly. # The goal of these competitions is to provide a fun, but less challenging, tabular dataset. These competitions will be great for people looking for something in between the Titanic Getting Started competition and a Featured competition. # The dataset is used for this competition is synthetic, but based on a real dataset and generated using a CTGAN. The original dataset deals with calculating the loss associated with a loan defaults. Although the features are anonymized, they have properties relating to real-world features. # **Task:Calculating the loss associated with a loan defaults.The features are anonymized, but they have properties relating to real-world features.** # **What is loan default?** # Default is a failure to repay a debt/loan on time. It can occur when a borrower fails to make timely payments on loans such as mortgage, bank loans, car leases, etc. # **Metric** # Submissions are scored on the root mean squared error. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directo import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Import Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import * from sklearn.pipeline import Pipeline, make_pipeline from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score from sklearn.preprocessing import StandardScaler, LabelEncoder from sklearn.model_selection import RandomizedSearchCV from sklearn import model_selection import lightgbm as lgbm import xgboost as xgb from lightgbm import LGBMRegressor from catboost import CatBoostRegressor from sklearn.ensemble import ( AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor, StackingRegressor, ) import warnings warnings.filterwarnings("ignore") # To supress warnings # # Read and Understand Data df_train = pd.read_csv("../input/tabular-playground-series-aug-2021/train.csv") df_test = pd.read_csv("../input/tabular-playground-series-aug-2021/test.csv") df_train.head() df_test.head() print( "#" * 40, "\nTrain", ) print(f"There are {df_train.shape[0]} rows and {df_train.shape[1]} columns") # fstring missing_df = pd.DataFrame( { "Missing": df_train.isnull().sum(), "Missing %": round( (df_train.isnull().sum() / df_train.isna().count() * 100), 2 ), } ) display(missing_df.sort_values(by="Missing", ascending=False)) print( "#" * 40, "\nTest", ) print(f"There are {df_test.shape[0]} rows and {df_test.shape[1]} columns") # fstring missing_df = pd.DataFrame( { "Missing": df_test.isnull().sum(), "Missing %": round((df_test.isnull().sum() / df_test.isna().count() * 100), 2), } ) display(missing_df.sort_values(by="Missing", ascending=False)) #### Check the data types of the columns for the dataset. df_train.info() intfeatures = df_train.select_dtypes(include="int64") intfeatures.columns #### Check the data types of the columns for the dataset. df_test.info() # Observation # - Training set has 250K observations with 102 features . # - Testing set has 150K observations with 101 features # - `Loss` column is the target variable which is only available in the train dataset. # - There are no missing values in both sets # - In train dataset, 95 features are float64 and 7('id', 'f1', 'f16', 'f27', 'f55', 'f86', 'loss') are of int64 type # Exploratory Data Analysis df_train.drop(columns=["id"]).describe().T.style.bar(subset=["mean"], color="#606ff2") def dist_box(data): # function plots a combined graph for univariate analysis of continous variable # to check spread, central tendency , dispersion and outliers Name = data.name.upper() fig, (ax_box, ax_dis) = plt.subplots( nrows=2, sharex=True, gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=(8, 5), ) mean = data.mean() median = data.median() mode = data.mode().tolist()[0] sns.set_theme(style="white") fig.suptitle("SPREAD OF DATA FOR " + Name, fontsize=18, fontweight="bold") sns.boxplot(x=data, showmeans=True, orient="h", color="Blue", ax=ax_box) ax_box.set(xlabel="") # just trying to make visualisation better. This will set background to white sns.despine(top=True, right=True, left=True) # to remove side line from graph sns.distplot(data, kde=False, color="#e218ed", ax=ax_dis) ax_dis.axvline(mean, color="r", linestyle="--", linewidth=2) ax_dis.axvline(median, color="g", linestyle="-", linewidth=2) plt.legend({"Mean": mean, "Median": median}) # select all quantitative columns for checking the spread list_col = df_train.select_dtypes(include="number").columns.to_list() for i in range(len(list_col)): dist_box(df_train[list_col[i]]) # - Most of the features are right skewed include loss feature # fig1, axes1 = plt.subplots(20, 5, figsize=(14, 19)) for i in range(len(list_col)): row = i // 2 col = i % 2 ax = axes1[row, col] sns.boxplot(df_train[list_col[i]], df_train["loss"], ax=ax).set( title=list_col[i].upper() ) corr = df_train.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) plt.figure(figsize=(15, 15)) plt.title("Correlation matrix for Train data") sns.heatmap(corr, mask=mask, linewidths=0.5) plt.show() # # Data Preparation X = df_train.drop(["id", "loss"], axis=1).values y = df_train["loss"].values scaler = StandardScaler() X = scaler.fit_transform(X)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/775/69775646.ipynb
null
null
[{"Id": 69775646, "ScriptId": 19069134, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6614570, "CreationDate": "08/03/2021 07:19:46", "VersionNumber": 3.0, "Title": "TPS_AUG_EDA_Model(in Progress)", "EvaluationDate": "08/03/2021", "IsChange": false, "TotalLines": 199.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 199.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Tabular Playground Series - Aug 2021 # Table of Contents # - [Introduction](#Introduction) # - [Libraries](#Import-Libraries) # - [Read and Understand Data](#Read-and-Understand-Data) # - [Exploratory Data Analysis](#Exploratory-Data-Analysis) # - [Data Preparation](#Data-Preparation) # - [Model Building Logistic Regression](#Model-Building-Logistic-Regression) # - [HyperParameter Tuning](#Hyperparameter-Tuning) # - [Conclusion](#Conclusion) # - [Business Recommendations & Insights](#Business-Recommendations-&-Insights) # # Introduction # Kaggle competitions are incredibly fun and rewarding, but they can also be intimidating for people who are relatively new in their data science journey. In the past, Kaggle have launched many Playground competitions that are more approachable than Featured competition, and thus more beginner-friendly. # The goal of these competitions is to provide a fun, but less challenging, tabular dataset. These competitions will be great for people looking for something in between the Titanic Getting Started competition and a Featured competition. # The dataset is used for this competition is synthetic, but based on a real dataset and generated using a CTGAN. The original dataset deals with calculating the loss associated with a loan defaults. Although the features are anonymized, they have properties relating to real-world features. # **Task:Calculating the loss associated with a loan defaults.The features are anonymized, but they have properties relating to real-world features.** # **What is loan default?** # Default is a failure to repay a debt/loan on time. It can occur when a borrower fails to make timely payments on loans such as mortgage, bank loans, car leases, etc. # **Metric** # Submissions are scored on the root mean squared error. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directo import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # Import Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import * from sklearn.pipeline import Pipeline, make_pipeline from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score from sklearn.preprocessing import StandardScaler, LabelEncoder from sklearn.model_selection import RandomizedSearchCV from sklearn import model_selection import lightgbm as lgbm import xgboost as xgb from lightgbm import LGBMRegressor from catboost import CatBoostRegressor from sklearn.ensemble import ( AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor, StackingRegressor, ) import warnings warnings.filterwarnings("ignore") # To supress warnings # # Read and Understand Data df_train = pd.read_csv("../input/tabular-playground-series-aug-2021/train.csv") df_test = pd.read_csv("../input/tabular-playground-series-aug-2021/test.csv") df_train.head() df_test.head() print( "#" * 40, "\nTrain", ) print(f"There are {df_train.shape[0]} rows and {df_train.shape[1]} columns") # fstring missing_df = pd.DataFrame( { "Missing": df_train.isnull().sum(), "Missing %": round( (df_train.isnull().sum() / df_train.isna().count() * 100), 2 ), } ) display(missing_df.sort_values(by="Missing", ascending=False)) print( "#" * 40, "\nTest", ) print(f"There are {df_test.shape[0]} rows and {df_test.shape[1]} columns") # fstring missing_df = pd.DataFrame( { "Missing": df_test.isnull().sum(), "Missing %": round((df_test.isnull().sum() / df_test.isna().count() * 100), 2), } ) display(missing_df.sort_values(by="Missing", ascending=False)) #### Check the data types of the columns for the dataset. df_train.info() intfeatures = df_train.select_dtypes(include="int64") intfeatures.columns #### Check the data types of the columns for the dataset. df_test.info() # Observation # - Training set has 250K observations with 102 features . # - Testing set has 150K observations with 101 features # - `Loss` column is the target variable which is only available in the train dataset. # - There are no missing values in both sets # - In train dataset, 95 features are float64 and 7('id', 'f1', 'f16', 'f27', 'f55', 'f86', 'loss') are of int64 type # Exploratory Data Analysis df_train.drop(columns=["id"]).describe().T.style.bar(subset=["mean"], color="#606ff2") def dist_box(data): # function plots a combined graph for univariate analysis of continous variable # to check spread, central tendency , dispersion and outliers Name = data.name.upper() fig, (ax_box, ax_dis) = plt.subplots( nrows=2, sharex=True, gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=(8, 5), ) mean = data.mean() median = data.median() mode = data.mode().tolist()[0] sns.set_theme(style="white") fig.suptitle("SPREAD OF DATA FOR " + Name, fontsize=18, fontweight="bold") sns.boxplot(x=data, showmeans=True, orient="h", color="Blue", ax=ax_box) ax_box.set(xlabel="") # just trying to make visualisation better. This will set background to white sns.despine(top=True, right=True, left=True) # to remove side line from graph sns.distplot(data, kde=False, color="#e218ed", ax=ax_dis) ax_dis.axvline(mean, color="r", linestyle="--", linewidth=2) ax_dis.axvline(median, color="g", linestyle="-", linewidth=2) plt.legend({"Mean": mean, "Median": median}) # select all quantitative columns for checking the spread list_col = df_train.select_dtypes(include="number").columns.to_list() for i in range(len(list_col)): dist_box(df_train[list_col[i]]) # - Most of the features are right skewed include loss feature # fig1, axes1 = plt.subplots(20, 5, figsize=(14, 19)) for i in range(len(list_col)): row = i // 2 col = i % 2 ax = axes1[row, col] sns.boxplot(df_train[list_col[i]], df_train["loss"], ax=ax).set( title=list_col[i].upper() ) corr = df_train.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) plt.figure(figsize=(15, 15)) plt.title("Correlation matrix for Train data") sns.heatmap(corr, mask=mask, linewidths=0.5) plt.show() # # Data Preparation X = df_train.drop(["id", "loss"], axis=1).values y = df_train["loss"].values scaler = StandardScaler() X = scaler.fit_transform(X)
false
0
2,042
0
2,042
2,042
69775892
from learntools.core import binder binder.bind(globals()) from learntools.python.ex5 import * print("Setup complete.") # # 1. # Have you ever felt debugging involved a bit of luck? The following program has a bug. Try to identify the bug and fix it. def has_lucky_number(nums): """Return whether the given list of numbers is lucky. A lucky list contains at least one number divisible by 7. """ for num in nums: if num % 7 == 0: return True else: return False # Try to identify the bug and fix it in the cell below: def has_lucky_number(nums): """Return whether the given list of numbers is lucky. A lucky list contains at least one number divisible by 7. """ for num in nums: if num % 7 == 0: return True return False # Check your answer q1.check() q1.hint() # q1.solution() # # 2. # Look at the Python expression below. What do you think we'll get when we run it? When you've made your prediction, uncomment the code and run the cell to see if you were right. # [1, 2, 3, 4] > 2 # R and Python have some libraries (like numpy and pandas) compare each element of the list to 2 (i.e. do an 'element-wise' comparison) and give us a list of booleans like `[False, False, True, True]`. # Implement a function that reproduces this behaviour, returning a list of booleans corresponding to whether the corresponding element is greater than n. def elementwise_greater_than(L, thresh): """Return a list with the same length as L, where the value at index i is True if L[i] is greater than thresh, and False otherwise. >>> elementwise_greater_than([1, 2, 3, 4], 2) [False, False, True, True] """ return [i > thresh for i in L] # Check your answer q2.check() # q2.solution() # # 3. # Complete the body of the function below according to its docstring. def menu_is_boring(meals): """Given a list of meals served over some period of time, return True if the same meal has ever been served two days in a row, and False otherwise. """ for i in range(len(meals) - 1): if meals[i] == meals[i + 1]: return True return False # Check your answer q3.check() q3.hint() # q3.solution() # # 4. 🌶️ # Next to the Blackjack table, the Python Challenge Casino has a slot machine. You can get a result from the slot machine by calling `play_slot_machine()`. The number it returns is your winnings in dollars. Usually it returns 0. But sometimes you'll get lucky and get a big payday. Try running it below: play_slot_machine() # By the way, did we mention that each play costs $1? Don't worry, we'll send you the bill later. # On average, how much money can you expect to gain (or lose) every time you play the machine? The casino keeps it a secret, but you can estimate the average value of each pull using a technique called the **Monte Carlo method**. To estimate the average outcome, we simulate the scenario many times, and return the average result. # Complete the following function to calculate the average value per play of the slot machine. def estimate_average_slot_payout(n_runs): """Run the slot machine n_runs times and return the average net profit per run. Example calls (note that return value is nondeterministic!): >>> estimate_average_slot_payout(1) -1 >>> estimate_average_slot_payout(1) 0.5 """ return sum([play_slot_machine() for i in range(n_runs)]) / n_runs q4.check() # When you think you know the expected value per spin, run the code cell below to view the solution and get credit for answering the question. # Check your answer (Run this code cell to receive credit!) q4.solution()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/775/69775892.ipynb
null
null
[{"Id": 69775892, "ScriptId": 19068889, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5137590, "CreationDate": "08/03/2021 07:21:26", "VersionNumber": 1.0, "Title": "Exercise: Loops and List Comprehensions", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 126.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 114.0, "LinesInsertedFromFork": 12.0, "LinesDeletedFromFork": 7.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 114.0, "TotalVotes": 0}]
null
null
null
null
from learntools.core import binder binder.bind(globals()) from learntools.python.ex5 import * print("Setup complete.") # # 1. # Have you ever felt debugging involved a bit of luck? The following program has a bug. Try to identify the bug and fix it. def has_lucky_number(nums): """Return whether the given list of numbers is lucky. A lucky list contains at least one number divisible by 7. """ for num in nums: if num % 7 == 0: return True else: return False # Try to identify the bug and fix it in the cell below: def has_lucky_number(nums): """Return whether the given list of numbers is lucky. A lucky list contains at least one number divisible by 7. """ for num in nums: if num % 7 == 0: return True return False # Check your answer q1.check() q1.hint() # q1.solution() # # 2. # Look at the Python expression below. What do you think we'll get when we run it? When you've made your prediction, uncomment the code and run the cell to see if you were right. # [1, 2, 3, 4] > 2 # R and Python have some libraries (like numpy and pandas) compare each element of the list to 2 (i.e. do an 'element-wise' comparison) and give us a list of booleans like `[False, False, True, True]`. # Implement a function that reproduces this behaviour, returning a list of booleans corresponding to whether the corresponding element is greater than n. def elementwise_greater_than(L, thresh): """Return a list with the same length as L, where the value at index i is True if L[i] is greater than thresh, and False otherwise. >>> elementwise_greater_than([1, 2, 3, 4], 2) [False, False, True, True] """ return [i > thresh for i in L] # Check your answer q2.check() # q2.solution() # # 3. # Complete the body of the function below according to its docstring. def menu_is_boring(meals): """Given a list of meals served over some period of time, return True if the same meal has ever been served two days in a row, and False otherwise. """ for i in range(len(meals) - 1): if meals[i] == meals[i + 1]: return True return False # Check your answer q3.check() q3.hint() # q3.solution() # # 4. 🌶️ # Next to the Blackjack table, the Python Challenge Casino has a slot machine. You can get a result from the slot machine by calling `play_slot_machine()`. The number it returns is your winnings in dollars. Usually it returns 0. But sometimes you'll get lucky and get a big payday. Try running it below: play_slot_machine() # By the way, did we mention that each play costs $1? Don't worry, we'll send you the bill later. # On average, how much money can you expect to gain (or lose) every time you play the machine? The casino keeps it a secret, but you can estimate the average value of each pull using a technique called the **Monte Carlo method**. To estimate the average outcome, we simulate the scenario many times, and return the average result. # Complete the following function to calculate the average value per play of the slot machine. def estimate_average_slot_payout(n_runs): """Run the slot machine n_runs times and return the average net profit per run. Example calls (note that return value is nondeterministic!): >>> estimate_average_slot_payout(1) -1 >>> estimate_average_slot_payout(1) 0.5 """ return sum([play_slot_machine() for i in range(n_runs)]) / n_runs q4.check() # When you think you know the expected value per spin, run the code cell below to view the solution and get credit for answering the question. # Check your answer (Run this code cell to receive credit!) q4.solution()
false
0
1,019
0
1,019
1,019
69775079
import numpy as np import pandas as pd import torch import transformers device = torch.device("cuda" if torch.cuda.is_available() else "cpu") torch.manual_seed(42) train = pd.read_csv("../input/commonlitreadabilityprize/train.csv") test = pd.read_csv("../input/commonlitreadabilityprize/test.csv") train.head() import matplotlib.pyplot as plt fig = plt.figure(figsize=(20, 4)) plt.subplot(221) train["target"].hist() plt.subplot(222) plt.scatter(train["target"], train["standard_error"]) plt.subplot(223) plt.plot(train["target"]) plt.show() # train['target'].hist(),train['target'].hist() import textstat ri = [textstat.textstat.automated_readability_index(i) for i in train["excerpt"].values] rf = [textstat.textstat.flesch_reading_ease(i) for i in train["excerpt"].values] rd = [ textstat.textstat.dale_chall_readability_score_v2(i) for i in train["excerpt"].values ] fig = plt.figure(figsize=(20, 4)) plt.subplot(131) plt.scatter(ri, train["target"]) plt.subplot(132) plt.scatter(rf, train["target"]) plt.subplot(133) plt.scatter(rd, train["target"]) plt.show() # # Tokenization from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("roberta-base") # # Dataset from torch.utils.data import Dataset, DataLoader class TextData(Dataset): def __init__(self, text, labels, max_len=250): self.text = text self.labels = labels self.tokenizer = tokenizer self.max_len = max_len def __len__(self): return len(self.text) def __getitem__(self, item): tokenized_text = tokenizer( self.text[item].replace("\n", ""), max_length=self.max_len, truncation=True, return_attention_mask=True, return_token_type_ids=True, ) padding_length = self.max_len - len(tokenized_text["input_ids"]) return { "input_ids": torch.tensor( tokenized_text["input_ids"] + ([0] * padding_length), dtype=torch.long ), #'token_type_ids':torch.tensor(tokenized_text['token_type_ids'] + ([0] * padding_length), dtype=torch.long), "attention_mask": torch.tensor( tokenized_text["attention_mask"] + ([0] * padding_length), dtype=torch.long, ), "label": torch.tensor(self.labels[item], dtype=torch.double), } torch.initial_seed() from torch.utils.data.dataset import random_split torch.manual_seed(1024) dataset = TextData(train["excerpt"].values, train["target"].values) train_dataset, valid_dataset = random_split(dataset, [2000, 834]) loaders = { "train": DataLoader(train_dataset, shuffle=True, batch_size=16), "valid": DataLoader(valid_dataset, batch_size=16), } # # Model from transformers import AutoModel class ReadModel(torch.nn.Module): def __init__(self): super(ReadModel, self).__init__() self.bert = AutoModel.from_pretrained( "roberta-base", output_hidden_states=False ) self.dropout = torch.nn.Dropout(0.2) self.hidden = net = torch.nn.Sequential( torch.nn.Linear(768, 384), torch.nn.LeakyReLU(), torch.nn.Dropout(0.2), torch.nn.Linear(384, 128), torch.nn.LeakyReLU(), ) self.regressor = torch.nn.Linear(128, 1) def forward(self, input_ids, attention_mask): output = self.bert(input_ids, attention_mask) output = output.last_hidden_state[:, 0] output = self.dropout(output) output = self.hidden(output) logits = self.regressor(output) return logits model = ReadModel() for param in model.bert.embeddings.parameters(): param.requires_grad = False for i in range(0, 10): for param in model.bert.encoder.layer[i].parameters(): param.requires_grad = False # # Runner import catalyst from catalyst import dl, metrics, utils catalyst.__version__ import torch from torch.nn import functional as F class CustomRunner(dl.Runner): def predict_batch(self, batch): input_ids = batch["input_ids"].T.to(self.device) # token_type_ids = batch['token_type_ids'].to(self.device) attention_mask = batch["attention_mask"].T.to(self.device) return self.model(input_ids, attention_mask) # , token_type_ids def on_loader_start(self, runner): super().on_loader_start(runner) self.meters = { key: metrics.AdditiveValueMetric(compute_on_call=False) for key in ["loss", "mae"] } def handle_batch(self, batch): input_ids = batch["input_ids"] # token_type_ids = batch['token_type_ids'] attention_mask = batch["attention_mask"] y = batch["label"].view(-1, 1).float() y_pred = ( self.model(input_ids, attention_mask).view(-1, 1).float() ) # , token_type_ids self.batch = {"logits": y_pred, "target": y} loss = F.mse_loss(y_pred.view(-1), y.view(-1)) self.batch_metrics.update({"loss": loss**0.5, "mae": F.l1_loss(y_pred, y)}) for key in ["loss", "mae"]: self.meters[key].update(self.batch_metrics[key].item(), self.batch_size) if self.is_train_loader: loss.backward(retain_graph=True) self.optimizer.step() self.optimizer.zero_grad() def on_loader_end(self, runner): for key in ["loss", "mae"]: self.loader_metrics[key] = self.meters[key].compute()[0] super().on_loader_end(runner) # # Train criterion = torch.nn.MSELoss() # optimizer = torch.optim.AdamW(model.parameters(), lr=0.00001) optimizer = torch.optim.AdamW( [ {"params": model.bert.parameters(), "lr": 0.00001}, {"params": model.hidden.parameters(), "lr": 0.001}, {"params": model.regressor.parameters(), "lr": 0.0001}, ] ) runner = CustomRunner() runner.train( model=model, optimizer=optimizer, loaders=loaders, logdir="logs", valid_loader="valid", valid_metric="loss", num_epochs=13, minimize_valid_metric=True, verbose=True, timeit=False, ) torch.save(runner.model.state_dict(), "n2_model.pth") # qmodel = utils.quantize_model(model=runner.model) # torch.save(qmodel.state_dict(), "q1_model.pth") # # Evaluate test_params = {"batch_size": 128 * 4, "shuffle": False, "drop_last": False} f = TextData(test["excerpt"].values, np.zeros(len(test))) f_generator = DataLoader(f, **test_params) def predict(model, data_loader): model.eval() result = np.zeros(len(data_loader.dataset)) index = 0 with torch.no_grad(): for item in data_loader: input_ids = item["input_ids"].cuda() attention_mask = item["attention_mask"].cuda() pred = model(input_ids, attention_mask) result[index : index + pred.shape[0]] = pred.flatten().cpu() index += pred.shape[0] return result predict(runner.model, f_generator) pre = pd.DataFrame( {"id": test.loc[:, "id"].values, "target": predict(runner.model, f_generator)} ) pre.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/775/69775079.ipynb
null
null
[{"Id": 69775079, "ScriptId": 18852803, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7367492, "CreationDate": "08/03/2021 07:15:45", "VersionNumber": 5.0, "Title": "CommonLit Readability lab2", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 259.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 252.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd import torch import transformers device = torch.device("cuda" if torch.cuda.is_available() else "cpu") torch.manual_seed(42) train = pd.read_csv("../input/commonlitreadabilityprize/train.csv") test = pd.read_csv("../input/commonlitreadabilityprize/test.csv") train.head() import matplotlib.pyplot as plt fig = plt.figure(figsize=(20, 4)) plt.subplot(221) train["target"].hist() plt.subplot(222) plt.scatter(train["target"], train["standard_error"]) plt.subplot(223) plt.plot(train["target"]) plt.show() # train['target'].hist(),train['target'].hist() import textstat ri = [textstat.textstat.automated_readability_index(i) for i in train["excerpt"].values] rf = [textstat.textstat.flesch_reading_ease(i) for i in train["excerpt"].values] rd = [ textstat.textstat.dale_chall_readability_score_v2(i) for i in train["excerpt"].values ] fig = plt.figure(figsize=(20, 4)) plt.subplot(131) plt.scatter(ri, train["target"]) plt.subplot(132) plt.scatter(rf, train["target"]) plt.subplot(133) plt.scatter(rd, train["target"]) plt.show() # # Tokenization from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("roberta-base") # # Dataset from torch.utils.data import Dataset, DataLoader class TextData(Dataset): def __init__(self, text, labels, max_len=250): self.text = text self.labels = labels self.tokenizer = tokenizer self.max_len = max_len def __len__(self): return len(self.text) def __getitem__(self, item): tokenized_text = tokenizer( self.text[item].replace("\n", ""), max_length=self.max_len, truncation=True, return_attention_mask=True, return_token_type_ids=True, ) padding_length = self.max_len - len(tokenized_text["input_ids"]) return { "input_ids": torch.tensor( tokenized_text["input_ids"] + ([0] * padding_length), dtype=torch.long ), #'token_type_ids':torch.tensor(tokenized_text['token_type_ids'] + ([0] * padding_length), dtype=torch.long), "attention_mask": torch.tensor( tokenized_text["attention_mask"] + ([0] * padding_length), dtype=torch.long, ), "label": torch.tensor(self.labels[item], dtype=torch.double), } torch.initial_seed() from torch.utils.data.dataset import random_split torch.manual_seed(1024) dataset = TextData(train["excerpt"].values, train["target"].values) train_dataset, valid_dataset = random_split(dataset, [2000, 834]) loaders = { "train": DataLoader(train_dataset, shuffle=True, batch_size=16), "valid": DataLoader(valid_dataset, batch_size=16), } # # Model from transformers import AutoModel class ReadModel(torch.nn.Module): def __init__(self): super(ReadModel, self).__init__() self.bert = AutoModel.from_pretrained( "roberta-base", output_hidden_states=False ) self.dropout = torch.nn.Dropout(0.2) self.hidden = net = torch.nn.Sequential( torch.nn.Linear(768, 384), torch.nn.LeakyReLU(), torch.nn.Dropout(0.2), torch.nn.Linear(384, 128), torch.nn.LeakyReLU(), ) self.regressor = torch.nn.Linear(128, 1) def forward(self, input_ids, attention_mask): output = self.bert(input_ids, attention_mask) output = output.last_hidden_state[:, 0] output = self.dropout(output) output = self.hidden(output) logits = self.regressor(output) return logits model = ReadModel() for param in model.bert.embeddings.parameters(): param.requires_grad = False for i in range(0, 10): for param in model.bert.encoder.layer[i].parameters(): param.requires_grad = False # # Runner import catalyst from catalyst import dl, metrics, utils catalyst.__version__ import torch from torch.nn import functional as F class CustomRunner(dl.Runner): def predict_batch(self, batch): input_ids = batch["input_ids"].T.to(self.device) # token_type_ids = batch['token_type_ids'].to(self.device) attention_mask = batch["attention_mask"].T.to(self.device) return self.model(input_ids, attention_mask) # , token_type_ids def on_loader_start(self, runner): super().on_loader_start(runner) self.meters = { key: metrics.AdditiveValueMetric(compute_on_call=False) for key in ["loss", "mae"] } def handle_batch(self, batch): input_ids = batch["input_ids"] # token_type_ids = batch['token_type_ids'] attention_mask = batch["attention_mask"] y = batch["label"].view(-1, 1).float() y_pred = ( self.model(input_ids, attention_mask).view(-1, 1).float() ) # , token_type_ids self.batch = {"logits": y_pred, "target": y} loss = F.mse_loss(y_pred.view(-1), y.view(-1)) self.batch_metrics.update({"loss": loss**0.5, "mae": F.l1_loss(y_pred, y)}) for key in ["loss", "mae"]: self.meters[key].update(self.batch_metrics[key].item(), self.batch_size) if self.is_train_loader: loss.backward(retain_graph=True) self.optimizer.step() self.optimizer.zero_grad() def on_loader_end(self, runner): for key in ["loss", "mae"]: self.loader_metrics[key] = self.meters[key].compute()[0] super().on_loader_end(runner) # # Train criterion = torch.nn.MSELoss() # optimizer = torch.optim.AdamW(model.parameters(), lr=0.00001) optimizer = torch.optim.AdamW( [ {"params": model.bert.parameters(), "lr": 0.00001}, {"params": model.hidden.parameters(), "lr": 0.001}, {"params": model.regressor.parameters(), "lr": 0.0001}, ] ) runner = CustomRunner() runner.train( model=model, optimizer=optimizer, loaders=loaders, logdir="logs", valid_loader="valid", valid_metric="loss", num_epochs=13, minimize_valid_metric=True, verbose=True, timeit=False, ) torch.save(runner.model.state_dict(), "n2_model.pth") # qmodel = utils.quantize_model(model=runner.model) # torch.save(qmodel.state_dict(), "q1_model.pth") # # Evaluate test_params = {"batch_size": 128 * 4, "shuffle": False, "drop_last": False} f = TextData(test["excerpt"].values, np.zeros(len(test))) f_generator = DataLoader(f, **test_params) def predict(model, data_loader): model.eval() result = np.zeros(len(data_loader.dataset)) index = 0 with torch.no_grad(): for item in data_loader: input_ids = item["input_ids"].cuda() attention_mask = item["attention_mask"].cuda() pred = model(input_ids, attention_mask) result[index : index + pred.shape[0]] = pred.flatten().cpu() index += pred.shape[0] return result predict(runner.model, f_generator) pre = pd.DataFrame( {"id": test.loc[:, "id"].values, "target": predict(runner.model, f_generator)} ) pre.to_csv("submission.csv", index=False)
false
0
2,177
0
2,177
2,177
69678816
<jupyter_start><jupyter_text>Hurricanes and Typhoons, 1851-2014 # Context The National Hurricane Center (NHC) conducts a post-storm analysis of each tropical cyclone in the Atlantic basin (i.e., North Atlantic Ocean, Gulf of Mexico, and Caribbean Sea) and and the North Pacific Ocean to determine the official assessment of the cyclone's history. This analysis makes use of all available observations, including those that may not have been available in real time. In addition, NHC conducts ongoing reviews of any retrospective tropical cyclone analyses brought to its attention and on a regular basis updates the historical record to reflect changes introduced. # Content The NHC publishes the tropical cyclone historical database in a format known as HURDAT, short for HURricane DATabase. These databases (Atlantic HURDAT2 and NE/NC Pacific HURDAT2) contain six-hourly information on the location, maximum winds, central pressure, and (starting in 2004) size of all known tropical cyclones and subtropical cyclones. Kaggle dataset identifier: hurricane-database <jupyter_script># # Analysis of caribbean hurricanes # Inspired by https://arxiv.org/abs/1802.02548 i also wanted to try to predict the tracks of hurricanes based on data of past storms. # I found this similar looking dataset and will try to do similar predictions but experiment with different methods (and not start with RNN's). import numpy as np import pandas as pd import geopandas import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.simplefilter("ignore") plt.style.use("bmh") df = pd.read_csv("/kaggle/input/hurricane-database/atlantic.csv") df["Time"] = df["Time"].astype("object") time_replace = [str(x) for x in df["Time"].unique()] for i, txt in enumerate(time_replace): time_replace[i] = txt.rjust(4, "0") time_replace[i] = f"{time_replace[i][0:2]}:{time_replace[i][2:4]}:00" for old, new in zip(df["Time"].unique(), time_replace): df.loc[df["Time"] == old, "Time"] = new # df["Time"].unique() df["Date"] = df["Date"].astype("object") for i, date_str in enumerate(df["Date"].unique()): df.loc[ df["Date"] == date_str, "Date" ] = f"{str(date_str)[0:4]}-{str(date_str)[4:6]}-{str(date_str)[6:]}" df["Datetime"] = df["Date"] + " " + df["Time"] df["Datetime"] = pd.to_datetime(df["Datetime"]) df.drop(columns=["Date", "Time"], inplace=True) df.sort_values(by=["Datetime"], inplace=True) df.head() df.tail()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/678/69678816.ipynb
hurricane-database
null
[{"Id": 69678816, "ScriptId": 19012947, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1282712, "CreationDate": "08/02/2021 18:06:15", "VersionNumber": 3.0, "Title": "Analysis of caribbean hurricanes", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 38.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 34.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93148162, "KernelVersionId": 69678816, "SourceDatasetVersionId": 1307}]
[{"Id": 1307, "DatasetId": 692, "DatasourceVersionId": 1307, "CreatorUserId": 797864, "LicenseName": "CC0: Public Domain", "CreationDate": "01/20/2017 18:15:43", "VersionNumber": 1.0, "Title": "Hurricanes and Typhoons, 1851-2014", "Slug": "hurricane-database", "Subtitle": "Location, wind, and pressure of tropical cyclones in Atlantic and Pacific Oceans", "Description": "# Context \n\nThe National Hurricane Center (NHC) conducts a post-storm analysis of each tropical cyclone in the Atlantic\nbasin (i.e., North Atlantic Ocean, Gulf of Mexico, and Caribbean Sea) and and the North Pacific Ocean to determine the official assessment of the cyclone's history. This analysis makes use of all available observations, including those that may not have been available in real time. In addition, NHC conducts ongoing reviews of any retrospective tropical cyclone analyses brought to its attention and on a regular basis updates the historical record to reflect\nchanges introduced.\n\n\n# Content\n\nThe NHC publishes the tropical cyclone historical database in a format known as HURDAT, short for HURricane DATabase. These databases (Atlantic HURDAT2 and NE/NC Pacific HURDAT2) contain six-hourly information on the location, maximum winds, central pressure, and (starting in 2004) size of all known tropical cyclones and subtropical cyclones.", "VersionNotes": "Initial release", "TotalCompressedBytes": 9531618.0, "TotalUncompressedBytes": 9531618.0}]
[{"Id": 692, "CreatorUserId": 797864, "OwnerUserId": NaN, "OwnerOrganizationId": 22.0, "CurrentDatasetVersionId": 1307.0, "CurrentDatasourceVersionId": 1307.0, "ForumId": 2421, "Type": 2, "CreationDate": "01/20/2017 18:15:43", "LastActivityDate": "02/04/2018", "TotalViews": 66975, "TotalDownloads": 9400, "TotalVotes": 158, "TotalKernels": 25}]
null
# # Analysis of caribbean hurricanes # Inspired by https://arxiv.org/abs/1802.02548 i also wanted to try to predict the tracks of hurricanes based on data of past storms. # I found this similar looking dataset and will try to do similar predictions but experiment with different methods (and not start with RNN's). import numpy as np import pandas as pd import geopandas import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.simplefilter("ignore") plt.style.use("bmh") df = pd.read_csv("/kaggle/input/hurricane-database/atlantic.csv") df["Time"] = df["Time"].astype("object") time_replace = [str(x) for x in df["Time"].unique()] for i, txt in enumerate(time_replace): time_replace[i] = txt.rjust(4, "0") time_replace[i] = f"{time_replace[i][0:2]}:{time_replace[i][2:4]}:00" for old, new in zip(df["Time"].unique(), time_replace): df.loc[df["Time"] == old, "Time"] = new # df["Time"].unique() df["Date"] = df["Date"].astype("object") for i, date_str in enumerate(df["Date"].unique()): df.loc[ df["Date"] == date_str, "Date" ] = f"{str(date_str)[0:4]}-{str(date_str)[4:6]}-{str(date_str)[6:]}" df["Datetime"] = df["Date"] + " " + df["Time"] df["Datetime"] = pd.to_datetime(df["Datetime"]) df.drop(columns=["Date", "Time"], inplace=True) df.sort_values(by=["Datetime"], inplace=True) df.head() df.tail()
false
0
462
0
750
462
69678325
<jupyter_start><jupyter_text>Clothing dataset (full, high resolution) ## Clothing dataset Over 5,000 images of 20 different classes. This dataset can be freely used for any purpose, including commercial: For example: * Creating a tutorial or a course (free or paid) * Writing a book * Kaggle competitions (as an external dataset) * Training an internal model at any company You can read more about this dataset here: https://medium.com/data-science-insider/clothing-dataset-5b72cd7c3f1f You can also get this data from GitHub: https://github.com/alexeygrigorev/clothing-dataset Kaggle dataset identifier: clothing-dataset-full <jupyter_script># # Clothes Classification import pandas as pd import tensorflow as tf from tensorflow import keras tf.__version__ DATA_DIR = "../input/clothing-dataset-full/images_original/" DATA_RAW = "../input/clothing-dataset-full/images.csv" MODEL_PATH = "../model.h5" WIDTH = 150 HEIGHT = 150 NUM_CHANNELS = 3 VALIDATION_SPLIT = 0.2 BATCH_SIZE = 128 LEARNING_RATE = 1e-4 EPOCHS = 10 # ## Prepare Data data = pd.read_csv(DATA_RAW) data = data.sample(len(data)) data.head(10) data["image_path"] = DATA_DIR + data["image"] + ".jpg" data.head() generator = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, validation_split=VALIDATION_SPLIT ) train_set = generator.flow_from_dataframe( dataframe=data, x_col="image_path", y_col="label", target_size=(WIDTH, HEIGHT), batch_size=BATCH_SIZE, class_mode="categorical", subset="training", shuffle=True, ) val_set = generator.flow_from_dataframe( dataframe=data, x_col="image_path", y_col="label", target_size=(WIDTH, HEIGHT), batch_size=BATCH_SIZE, class_mode="categorical", subset="validation", shuffle=False, ) num_classes = len(train_set.class_indices) print("Total classes:", num_classes) # ## Design model model = keras.Sequential( [ keras.Input(shape=(WIDTH, HEIGHT, NUM_CHANNELS)), keras.layers.Conv2D(128, (5, 5), activation=tf.nn.relu), keras.layers.Conv2D(128, (5, 5), activation=tf.nn.relu), keras.layers.MaxPooling2D(2, 2), keras.layers.Conv2D(64, (5, 5), activation=tf.nn.relu), keras.layers.Conv2D(64, (5, 5), activation=tf.nn.relu), keras.layers.MaxPooling2D(2, 2), keras.layers.Conv2D(32, (5, 5), activation=tf.nn.relu), keras.layers.Conv2D(32, (5, 5), activation=tf.nn.relu), keras.layers.MaxPooling2D(2, 2), keras.layers.Flatten(), keras.layers.Dense(128, activation=tf.nn.relu), keras.layers.Dense(num_classes, activation=tf.nn.softmax), ] ) model.summary() # ## Train model model.compile( loss="categorical_crossentropy", optimizer=keras.optimizers.RMSprop(lr=LEARNING_RATE), metrics=["accuracy"], ) _ = model.fit(train_set, epochs=EPOCHS) # ## Evaluate model _ = model.evaluate(val_set) # ## Save model model.save(MODEL_PATH)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/678/69678325.ipynb
clothing-dataset-full
agrigorev
[{"Id": 69678325, "ScriptId": 19023244, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3372904, "CreationDate": "08/02/2021 18:02:24", "VersionNumber": 1.0, "Title": "02. Classify Clothes", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 89.0, "LinesInsertedFromPrevious": 89.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93147539, "KernelVersionId": 69678325, "SourceDatasetVersionId": 1572891}]
[{"Id": 1572891, "DatasetId": 929774, "DatasourceVersionId": 1608011, "CreatorUserId": 107207, "LicenseName": "CC0: Public Domain", "CreationDate": "10/19/2020 22:16:31", "VersionNumber": 1.0, "Title": "Clothing dataset (full, high resolution)", "Slug": "clothing-dataset-full", "Subtitle": "5,000 images of clothes released under CC0", "Description": "## Clothing dataset\n\nOver 5,000 images of 20 different classes.\n\nThis dataset can be freely used for any purpose, including commercial:\n\nFor example:\n* Creating a tutorial or a course (free or paid)\n* Writing a book\n* Kaggle competitions (as an external dataset)\n* Training an internal model at any company\n\nYou can read more about this dataset here: https://medium.com/data-science-insider/clothing-dataset-5b72cd7c3f1f\n\nYou can also get this data from GitHub: https://github.com/alexeygrigorev/clothing-dataset\n\n### Acknowledgements \n\nWe'd like to thank\n\n* Kenes Shangereyev and Tagias.com for helping with 3000 images\n* All the 32 people who contributed their images to the dataset via the forms\n* Everyone who supported the initiative by engaging with the announcements on social media \n\nIt wouldn't be possible to collect this dataset without your help!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 929774, "CreatorUserId": 107207, "OwnerUserId": 107207.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1572891.0, "CurrentDatasourceVersionId": 1608011.0, "ForumId": 945730, "Type": 2, "CreationDate": "10/19/2020 22:16:31", "LastActivityDate": "10/19/2020", "TotalViews": 64487, "TotalDownloads": 6111, "TotalVotes": 117, "TotalKernels": 22}]
[{"Id": 107207, "UserName": "agrigorev", "DisplayName": "ololo", "RegisterDate": "06/10/2013", "PerformanceTier": 3}]
# # Clothes Classification import pandas as pd import tensorflow as tf from tensorflow import keras tf.__version__ DATA_DIR = "../input/clothing-dataset-full/images_original/" DATA_RAW = "../input/clothing-dataset-full/images.csv" MODEL_PATH = "../model.h5" WIDTH = 150 HEIGHT = 150 NUM_CHANNELS = 3 VALIDATION_SPLIT = 0.2 BATCH_SIZE = 128 LEARNING_RATE = 1e-4 EPOCHS = 10 # ## Prepare Data data = pd.read_csv(DATA_RAW) data = data.sample(len(data)) data.head(10) data["image_path"] = DATA_DIR + data["image"] + ".jpg" data.head() generator = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, validation_split=VALIDATION_SPLIT ) train_set = generator.flow_from_dataframe( dataframe=data, x_col="image_path", y_col="label", target_size=(WIDTH, HEIGHT), batch_size=BATCH_SIZE, class_mode="categorical", subset="training", shuffle=True, ) val_set = generator.flow_from_dataframe( dataframe=data, x_col="image_path", y_col="label", target_size=(WIDTH, HEIGHT), batch_size=BATCH_SIZE, class_mode="categorical", subset="validation", shuffle=False, ) num_classes = len(train_set.class_indices) print("Total classes:", num_classes) # ## Design model model = keras.Sequential( [ keras.Input(shape=(WIDTH, HEIGHT, NUM_CHANNELS)), keras.layers.Conv2D(128, (5, 5), activation=tf.nn.relu), keras.layers.Conv2D(128, (5, 5), activation=tf.nn.relu), keras.layers.MaxPooling2D(2, 2), keras.layers.Conv2D(64, (5, 5), activation=tf.nn.relu), keras.layers.Conv2D(64, (5, 5), activation=tf.nn.relu), keras.layers.MaxPooling2D(2, 2), keras.layers.Conv2D(32, (5, 5), activation=tf.nn.relu), keras.layers.Conv2D(32, (5, 5), activation=tf.nn.relu), keras.layers.MaxPooling2D(2, 2), keras.layers.Flatten(), keras.layers.Dense(128, activation=tf.nn.relu), keras.layers.Dense(num_classes, activation=tf.nn.softmax), ] ) model.summary() # ## Train model model.compile( loss="categorical_crossentropy", optimizer=keras.optimizers.RMSprop(lr=LEARNING_RATE), metrics=["accuracy"], ) _ = model.fit(train_set, epochs=EPOCHS) # ## Evaluate model _ = model.evaluate(val_set) # ## Save model model.save(MODEL_PATH)
false
0
780
0
957
780
69678179
# # Understat Series : Heatmaps # **By Jose Gonzalez** # **Was greatly inspired by the [This FC Python Tutorial](https://fcpython.com/visualisation/football-heatmaps-seaborn) They have a great webape explaining basics , shotmaps, passing networks, dashboards etc , [Check out their website](https://fcpython.com/)** # ![image.png](attachment:image.png) # # Introduction # Although most of the soccer heatmaps are for passes and tackles and movement, I think it's interesting the do a quick analysis of heatmaps for shots. Based on the tutorial above from FC Python , I've decided to explore this topic using Understat's data. # # Notebook content # * [1. Introduction](#1.) # - [1.1. Notebook Content.](#1.1) # * [2.Importing libraries.](#2.) # * [3.Scraping shots](#3.) # - [3.1. Scraping match IDs.](#3.1) # - [3.2. Understat for loops.](#3.2) # * [4. Filtering FC Barcelona shots](#4.) # * [5. Draw pitch](#5.) # * [6. Plotting the Heatmaps](#6.) # - [6.1. Free kicks vs Corners.](#6.1) # - [6.2. Messi Goals vs Messi Shots](#6.2) # - [6.3. Pedri vs Dembele shots](#6.3) # - [6.4. All shots overlaid](#6.4) # * [7. References](#7.) # # Importing Libraries import requests from bs4 import BeautifulSoup import json from tqdm import tqdm import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from highlight_text import fig_text from matplotlib.patches import Arc pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) import re # # # Scraping shots # In order to scrape all shots taken by each player from la liga, we must first scrape their IDs and change their data type from obj to int and then run a loop with those values to scrape all the shots the players have taken throughout their careers. # ## Scraping the match IDs # Entering the league's link link = "https://understat.com/league/La_liga" res = requests.get(link) soup = BeautifulSoup(res.content, "lxml") scripts = soup.find_all("script") # Get the players stats strings = scripts[3].string # Getting rid of unnecessary characters from json data ind_start = strings.index("('") + 2 ind_end = strings.index("')") json_data = strings[ind_start:ind_end] json_data = json_data.encode("utf8").decode("unicode_escape") data = json.loads(json_data) # Creating the dataframe all_shots = pd.DataFrame(data) # Changing the data type using pd.to_numeric() function all_shots["id"] = pd.to_numeric(all_shots["id"]) idd = all_shots["id"].values.tolist() # # ## Understat for loops # Once the match IDs have scraped, let's run the loop , it'll take around 10 minutes : shoots = pd.DataFrame() for i in tqdm(idd): url = f"https://understat.com/player/{i}" r = requests.get(url) soup = BeautifulSoup(r.content, "lxml") scripts = soup.find_all("script") strings = scripts[3].string ind_start = strings.index("('") + 2 ind_end = strings.index("')") json_data = strings[ind_start:ind_end] json_data = json_data.encode("utf8").decode("unicode_escape") data = json.loads(json_data) shoots = shoots.append(pd.DataFrame(data)) # # # Filtering FC Barcelona shoots # Now that we've scraped every single shot in La Liga, let's focus on shots taken by FC Barcelona : barca_shoots = shoots[ (shoots["h_team"] == "Barcelona") | ((shoots["a_team"] == "Barcelona")) ] print(barca_shoots.shape) display(barca_shoots.head(9)) # We've got 4744 shots taken by Barcelona in the 2020/2021 Season , now let's change the data types of a few columns in order to manipulate them for further analysis and modify the field dimensions to adjust to our soccer pitch: # Changing data types barca_shoots["X"] = barca_shoots["X"].astype("float64") barca_shoots["Y"] = barca_shoots["Y"].astype("float64") # Adjustind dimensions for soccer pitch barca_shoots["X1"] = (barca_shoots["X"] / 100) * 105 * 100 barca_shoots["Y1"] = (barca_shoots["Y"] / 100) * 68 * 100 # # # Football Pitch # Now let's create the soccer pitch and once again it's all thanks again to [This FC Python Tutorial](https://fcpython.com/visualisation/drawing-pitchmap-adding-lines-circles-matplotlib) def football_pitch( x_min=0, x_max=105, y_min=0, y_max=68, pitch_color="#f0f0f0", line_color="black", line_thickness=1.5, point_size=20, orientation="horizontal", aspect="full", axis="off", ax=None, ): if not ax: raise TypeError( "This function is intended to be used with an existing fig and ax in order to allow flexibility in plotting of various sizes and in subplots." ) if orientation.lower().startswith("h"): first = 0 second = 1 arc_angle = 0 if aspect == "half": ax.set_xlim(x_max / 2, x_max + 5) elif orientation.lower().startswith("v"): first = 1 second = 0 arc_angle = 90 if aspect == "half": ax.set_ylim(x_max / 2, x_max + 5) else: raise NameError("You must choose one of horizontal or vertical") ax.axis(axis) rect = plt.Rectangle( (x_min, y_min), x_max, y_max, facecolor=pitch_color, edgecolor="none", zorder=-2 ) ax.add_artist(rect) x_conversion = x_max / 100 y_conversion = y_max / 100 pitch_x = [0, 5.8, 11.5, 17, 50, 83, 88.5, 94.2, 100] # x dimension markings pitch_x = [x * x_conversion for x in pitch_x] pitch_y = [0, 21.1, 36.6, 50, 63.2, 78.9, 100] # y dimension markings pitch_y = [x * y_conversion for x in pitch_y] goal_y = [45.2, 54.8] # goal posts goal_y = [x * y_conversion for x in goal_y] # side and goal lines lx1 = [x_min, x_max, x_max, x_min, x_min] ly1 = [y_min, y_min, y_max, y_max, y_min] # outer box lx2 = [x_max, pitch_x[5], pitch_x[5], x_max] ly2 = [pitch_y[1], pitch_y[1], pitch_y[5], pitch_y[5]] lx3 = [0, pitch_x[3], pitch_x[3], 0] ly3 = [pitch_y[1], pitch_y[1], pitch_y[5], pitch_y[5]] # goals lx4 = [x_max, x_max + 2, x_max + 2, x_max] ly4 = [goal_y[0], goal_y[0], goal_y[1], goal_y[1]] lx5 = [0, -2, -2, 0] ly5 = [goal_y[0], goal_y[0], goal_y[1], goal_y[1]] # 6 yard box lx6 = [x_max, pitch_x[7], pitch_x[7], x_max] ly6 = [pitch_y[2], pitch_y[2], pitch_y[4], pitch_y[4]] lx7 = [0, pitch_x[1], pitch_x[1], 0] ly7 = [pitch_y[2], pitch_y[2], pitch_y[4], pitch_y[4]] # Halfline, penalty spots, and kickoff spot lx8 = [pitch_x[4], pitch_x[4]] ly8 = [0, y_max] lines = [ [lx1, ly1], [lx2, ly2], [lx3, ly3], [lx4, ly4], [lx5, ly5], [lx6, ly6], [lx7, ly7], [lx8, ly8], ] points = [ [pitch_x[6], pitch_y[3]], [pitch_x[2], pitch_y[3]], [pitch_x[4], pitch_y[3]], ] circle_points = [pitch_x[4], pitch_y[3]] arc_points1 = [pitch_x[6], pitch_y[3]] arc_points2 = [pitch_x[2], pitch_y[3]] for line in lines: ax.plot( line[first], line[second], color=line_color, lw=line_thickness, zorder=-1 ) for point in points: ax.scatter( point[first], point[second], color=line_color, s=point_size, zorder=-1 ) circle = plt.Circle( (circle_points[first], circle_points[second]), x_max * 0.088, lw=line_thickness, color=line_color, fill=False, zorder=-1, ) ax.add_artist(circle) arc1 = Arc( (arc_points1[first], arc_points1[second]), height=x_max * 0.088 * 2, width=x_max * 0.088 * 2, angle=arc_angle, theta1=128.75, theta2=231.25, color=line_color, lw=line_thickness, zorder=-1, ) ax.add_artist(arc1) arc2 = Arc( (arc_points2[first], arc_points2[second]), height=x_max * 0.088 * 2, width=x_max * 0.088 * 2, angle=arc_angle, theta1=308.75, theta2=51.25, color=line_color, lw=line_thickness, zorder=-1, ) ax.add_artist(arc2) ax.set_aspect("equal") return ax # # # Plotting heatmaps # Before plotting the heatmaps let's separate the shots by situation, let's check the situations available within the data : print(barca_shoots.situation.unique()) # We'll ignore Penalties because it does not make sense to plot it, it's always the same starting point. open_play = barca_shoots[barca_shoots["situation"] == "OpenPlay"] free_kick = barca_shoots[barca_shoots["situation"] == "DirectFreekick"] corner = barca_shoots[barca_shoots["situation"] == "FromCorner"] set_piece = barca_shoots[barca_shoots["situation"] == "SetPiece"] # # ## Free kicks and Corners # Now finally let's plot the heatmaps, let's start by 2 heatmaps one for free kicks and one for Penalties fig = plt.figure(figsize=(15, 20), constrained_layout=True) gs = fig.add_gridspec(nrows=1, ncols=2) ax = fig.add_subplot(gs[0]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax) sns.kdeplot(free_kick["Y1"], free_kick["X1"], shade="True", color="cyan", levels=10) ax1 = fig.add_subplot(gs[1]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax1) sns.kdeplot(corner["Y1"], corner["X1"], shade="True", color="crimson", levels=10) fig_text( 0.55, 0.66, s="FC Barcelona Corners 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="crimson", ) fig_text( 0.07, 0.66, s="FC Barcelona Free Kicks 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="cyan", ) # # ## Messi Goals vs Total Shots # Now let's plot the goals scored by Messi and the total shots taken by him : messi = barca_shoots[barca_shoots["player"] == "Lionel Messi"] messi_goals = messi[messi["result"] == "Goal"] fig = plt.figure(figsize=(15, 20), constrained_layout=True) gs = fig.add_gridspec(nrows=1, ncols=2) ax = fig.add_subplot(gs[0]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax) sns.kdeplot(messi_goals["Y1"], messi_goals["X1"], shade="True", color="cyan", levels=10) ax1 = fig.add_subplot(gs[1]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax1) sns.kdeplot(messi["Y1"], messi["X1"], shade="True", color="crimson", levels=10) fig_text( 0.55, 0.66, s="Lionel Messi Goals 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="crimson", ) fig_text( 0.07, 0.66, s="Lionel Messi Total Shots 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="cyan", ) # # ## Pedri and Dembele Shots 2020/2021 Season # Let's go now to my other two favorite players Pedri and Dembele pedri = barca_shoots[barca_shoots["player"] == "Pedri"] dembele = barca_shoots[barca_shoots["player"] == "Ousmane Dembélé"] fig = plt.figure(figsize=(15, 20), constrained_layout=True) gs = fig.add_gridspec(nrows=1, ncols=2) ax = fig.add_subplot(gs[0]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax) sns.kdeplot(pedri["Y1"], pedri["X1"], shade="True", color="cyan", levels=10) ax1 = fig.add_subplot(gs[1]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax1) sns.kdeplot(dembele["Y1"], dembele["X1"], shade="True", color="crimson", levels=10) fig_text( 0.55, 0.66, s="Ousmane Dembele Total Shots 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="crimson", ) fig_text( 0.07, 0.66, s="Pedri Gonzalez Total Shots 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="cyan", ) # Looking at Dembele's heatmap it's remarkable his control of both sides of the pitch as he's ambidextrous # ## All Barcelona shots last season overlaid # Finally let's overlay every shot taken in la liga by their situation: fig, ax = plt.subplots(figsize=(10, 14)) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax) # sns.kdeplot(free_kick["X1"],free_kick["Y1"], levels = 800, thresh = 0.3,shade=True,color="cyan") sns.kdeplot( data=barca_shoots, x="Y1", y="X1", hue="situation", fill=True, shade=True, levels=10 )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/678/69678179.ipynb
null
null
[{"Id": 69678179, "ScriptId": 19032374, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5717744, "CreationDate": "08/02/2021 18:01:08", "VersionNumber": 2.0, "Title": "# Understat Series : Heatmaps", "EvaluationDate": "08/02/2021", "IsChange": false, "TotalLines": 367.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 367.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Understat Series : Heatmaps # **By Jose Gonzalez** # **Was greatly inspired by the [This FC Python Tutorial](https://fcpython.com/visualisation/football-heatmaps-seaborn) They have a great webape explaining basics , shotmaps, passing networks, dashboards etc , [Check out their website](https://fcpython.com/)** # ![image.png](attachment:image.png) # # Introduction # Although most of the soccer heatmaps are for passes and tackles and movement, I think it's interesting the do a quick analysis of heatmaps for shots. Based on the tutorial above from FC Python , I've decided to explore this topic using Understat's data. # # Notebook content # * [1. Introduction](#1.) # - [1.1. Notebook Content.](#1.1) # * [2.Importing libraries.](#2.) # * [3.Scraping shots](#3.) # - [3.1. Scraping match IDs.](#3.1) # - [3.2. Understat for loops.](#3.2) # * [4. Filtering FC Barcelona shots](#4.) # * [5. Draw pitch](#5.) # * [6. Plotting the Heatmaps](#6.) # - [6.1. Free kicks vs Corners.](#6.1) # - [6.2. Messi Goals vs Messi Shots](#6.2) # - [6.3. Pedri vs Dembele shots](#6.3) # - [6.4. All shots overlaid](#6.4) # * [7. References](#7.) # # Importing Libraries import requests from bs4 import BeautifulSoup import json from tqdm import tqdm import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from highlight_text import fig_text from matplotlib.patches import Arc pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) import re # # # Scraping shots # In order to scrape all shots taken by each player from la liga, we must first scrape their IDs and change their data type from obj to int and then run a loop with those values to scrape all the shots the players have taken throughout their careers. # ## Scraping the match IDs # Entering the league's link link = "https://understat.com/league/La_liga" res = requests.get(link) soup = BeautifulSoup(res.content, "lxml") scripts = soup.find_all("script") # Get the players stats strings = scripts[3].string # Getting rid of unnecessary characters from json data ind_start = strings.index("('") + 2 ind_end = strings.index("')") json_data = strings[ind_start:ind_end] json_data = json_data.encode("utf8").decode("unicode_escape") data = json.loads(json_data) # Creating the dataframe all_shots = pd.DataFrame(data) # Changing the data type using pd.to_numeric() function all_shots["id"] = pd.to_numeric(all_shots["id"]) idd = all_shots["id"].values.tolist() # # ## Understat for loops # Once the match IDs have scraped, let's run the loop , it'll take around 10 minutes : shoots = pd.DataFrame() for i in tqdm(idd): url = f"https://understat.com/player/{i}" r = requests.get(url) soup = BeautifulSoup(r.content, "lxml") scripts = soup.find_all("script") strings = scripts[3].string ind_start = strings.index("('") + 2 ind_end = strings.index("')") json_data = strings[ind_start:ind_end] json_data = json_data.encode("utf8").decode("unicode_escape") data = json.loads(json_data) shoots = shoots.append(pd.DataFrame(data)) # # # Filtering FC Barcelona shoots # Now that we've scraped every single shot in La Liga, let's focus on shots taken by FC Barcelona : barca_shoots = shoots[ (shoots["h_team"] == "Barcelona") | ((shoots["a_team"] == "Barcelona")) ] print(barca_shoots.shape) display(barca_shoots.head(9)) # We've got 4744 shots taken by Barcelona in the 2020/2021 Season , now let's change the data types of a few columns in order to manipulate them for further analysis and modify the field dimensions to adjust to our soccer pitch: # Changing data types barca_shoots["X"] = barca_shoots["X"].astype("float64") barca_shoots["Y"] = barca_shoots["Y"].astype("float64") # Adjustind dimensions for soccer pitch barca_shoots["X1"] = (barca_shoots["X"] / 100) * 105 * 100 barca_shoots["Y1"] = (barca_shoots["Y"] / 100) * 68 * 100 # # # Football Pitch # Now let's create the soccer pitch and once again it's all thanks again to [This FC Python Tutorial](https://fcpython.com/visualisation/drawing-pitchmap-adding-lines-circles-matplotlib) def football_pitch( x_min=0, x_max=105, y_min=0, y_max=68, pitch_color="#f0f0f0", line_color="black", line_thickness=1.5, point_size=20, orientation="horizontal", aspect="full", axis="off", ax=None, ): if not ax: raise TypeError( "This function is intended to be used with an existing fig and ax in order to allow flexibility in plotting of various sizes and in subplots." ) if orientation.lower().startswith("h"): first = 0 second = 1 arc_angle = 0 if aspect == "half": ax.set_xlim(x_max / 2, x_max + 5) elif orientation.lower().startswith("v"): first = 1 second = 0 arc_angle = 90 if aspect == "half": ax.set_ylim(x_max / 2, x_max + 5) else: raise NameError("You must choose one of horizontal or vertical") ax.axis(axis) rect = plt.Rectangle( (x_min, y_min), x_max, y_max, facecolor=pitch_color, edgecolor="none", zorder=-2 ) ax.add_artist(rect) x_conversion = x_max / 100 y_conversion = y_max / 100 pitch_x = [0, 5.8, 11.5, 17, 50, 83, 88.5, 94.2, 100] # x dimension markings pitch_x = [x * x_conversion for x in pitch_x] pitch_y = [0, 21.1, 36.6, 50, 63.2, 78.9, 100] # y dimension markings pitch_y = [x * y_conversion for x in pitch_y] goal_y = [45.2, 54.8] # goal posts goal_y = [x * y_conversion for x in goal_y] # side and goal lines lx1 = [x_min, x_max, x_max, x_min, x_min] ly1 = [y_min, y_min, y_max, y_max, y_min] # outer box lx2 = [x_max, pitch_x[5], pitch_x[5], x_max] ly2 = [pitch_y[1], pitch_y[1], pitch_y[5], pitch_y[5]] lx3 = [0, pitch_x[3], pitch_x[3], 0] ly3 = [pitch_y[1], pitch_y[1], pitch_y[5], pitch_y[5]] # goals lx4 = [x_max, x_max + 2, x_max + 2, x_max] ly4 = [goal_y[0], goal_y[0], goal_y[1], goal_y[1]] lx5 = [0, -2, -2, 0] ly5 = [goal_y[0], goal_y[0], goal_y[1], goal_y[1]] # 6 yard box lx6 = [x_max, pitch_x[7], pitch_x[7], x_max] ly6 = [pitch_y[2], pitch_y[2], pitch_y[4], pitch_y[4]] lx7 = [0, pitch_x[1], pitch_x[1], 0] ly7 = [pitch_y[2], pitch_y[2], pitch_y[4], pitch_y[4]] # Halfline, penalty spots, and kickoff spot lx8 = [pitch_x[4], pitch_x[4]] ly8 = [0, y_max] lines = [ [lx1, ly1], [lx2, ly2], [lx3, ly3], [lx4, ly4], [lx5, ly5], [lx6, ly6], [lx7, ly7], [lx8, ly8], ] points = [ [pitch_x[6], pitch_y[3]], [pitch_x[2], pitch_y[3]], [pitch_x[4], pitch_y[3]], ] circle_points = [pitch_x[4], pitch_y[3]] arc_points1 = [pitch_x[6], pitch_y[3]] arc_points2 = [pitch_x[2], pitch_y[3]] for line in lines: ax.plot( line[first], line[second], color=line_color, lw=line_thickness, zorder=-1 ) for point in points: ax.scatter( point[first], point[second], color=line_color, s=point_size, zorder=-1 ) circle = plt.Circle( (circle_points[first], circle_points[second]), x_max * 0.088, lw=line_thickness, color=line_color, fill=False, zorder=-1, ) ax.add_artist(circle) arc1 = Arc( (arc_points1[first], arc_points1[second]), height=x_max * 0.088 * 2, width=x_max * 0.088 * 2, angle=arc_angle, theta1=128.75, theta2=231.25, color=line_color, lw=line_thickness, zorder=-1, ) ax.add_artist(arc1) arc2 = Arc( (arc_points2[first], arc_points2[second]), height=x_max * 0.088 * 2, width=x_max * 0.088 * 2, angle=arc_angle, theta1=308.75, theta2=51.25, color=line_color, lw=line_thickness, zorder=-1, ) ax.add_artist(arc2) ax.set_aspect("equal") return ax # # # Plotting heatmaps # Before plotting the heatmaps let's separate the shots by situation, let's check the situations available within the data : print(barca_shoots.situation.unique()) # We'll ignore Penalties because it does not make sense to plot it, it's always the same starting point. open_play = barca_shoots[barca_shoots["situation"] == "OpenPlay"] free_kick = barca_shoots[barca_shoots["situation"] == "DirectFreekick"] corner = barca_shoots[barca_shoots["situation"] == "FromCorner"] set_piece = barca_shoots[barca_shoots["situation"] == "SetPiece"] # # ## Free kicks and Corners # Now finally let's plot the heatmaps, let's start by 2 heatmaps one for free kicks and one for Penalties fig = plt.figure(figsize=(15, 20), constrained_layout=True) gs = fig.add_gridspec(nrows=1, ncols=2) ax = fig.add_subplot(gs[0]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax) sns.kdeplot(free_kick["Y1"], free_kick["X1"], shade="True", color="cyan", levels=10) ax1 = fig.add_subplot(gs[1]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax1) sns.kdeplot(corner["Y1"], corner["X1"], shade="True", color="crimson", levels=10) fig_text( 0.55, 0.66, s="FC Barcelona Corners 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="crimson", ) fig_text( 0.07, 0.66, s="FC Barcelona Free Kicks 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="cyan", ) # # ## Messi Goals vs Total Shots # Now let's plot the goals scored by Messi and the total shots taken by him : messi = barca_shoots[barca_shoots["player"] == "Lionel Messi"] messi_goals = messi[messi["result"] == "Goal"] fig = plt.figure(figsize=(15, 20), constrained_layout=True) gs = fig.add_gridspec(nrows=1, ncols=2) ax = fig.add_subplot(gs[0]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax) sns.kdeplot(messi_goals["Y1"], messi_goals["X1"], shade="True", color="cyan", levels=10) ax1 = fig.add_subplot(gs[1]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax1) sns.kdeplot(messi["Y1"], messi["X1"], shade="True", color="crimson", levels=10) fig_text( 0.55, 0.66, s="Lionel Messi Goals 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="crimson", ) fig_text( 0.07, 0.66, s="Lionel Messi Total Shots 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="cyan", ) # # ## Pedri and Dembele Shots 2020/2021 Season # Let's go now to my other two favorite players Pedri and Dembele pedri = barca_shoots[barca_shoots["player"] == "Pedri"] dembele = barca_shoots[barca_shoots["player"] == "Ousmane Dembélé"] fig = plt.figure(figsize=(15, 20), constrained_layout=True) gs = fig.add_gridspec(nrows=1, ncols=2) ax = fig.add_subplot(gs[0]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax) sns.kdeplot(pedri["Y1"], pedri["X1"], shade="True", color="cyan", levels=10) ax1 = fig.add_subplot(gs[1]) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax1) sns.kdeplot(dembele["Y1"], dembele["X1"], shade="True", color="crimson", levels=10) fig_text( 0.55, 0.66, s="Ousmane Dembele Total Shots 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="crimson", ) fig_text( 0.07, 0.66, s="Pedri Gonzalez Total Shots 2020/2021 Season", font="Comic Sans MS", fontsize=20, fontweight="bold", color="cyan", ) # Looking at Dembele's heatmap it's remarkable his control of both sides of the pitch as he's ambidextrous # ## All Barcelona shots last season overlaid # Finally let's overlay every shot taken in la liga by their situation: fig, ax = plt.subplots(figsize=(10, 14)) football_pitch(orientation="vertical", aspect="half", line_color="black", ax=ax) # sns.kdeplot(free_kick["X1"],free_kick["Y1"], levels = 800, thresh = 0.3,shade=True,color="cyan") sns.kdeplot( data=barca_shoots, x="Y1", y="X1", hue="situation", fill=True, shade=True, levels=10 )
false
0
4,427
0
4,427
4,427
69678111
<jupyter_start><jupyter_text>Netflix Movies and TV Shows ### TV Shows and Movies listed on Netflix This dataset consists of tv shows and movies available on Netflix as of 2019. The dataset is collected from Flixable which is a third-party Netflix search engine. In 2018, they released an interesting [report](https://flixable.com/netflix-museum/) which shows that the number of TV shows on Netflix has nearly tripled since 2010. The streaming service’s number of movies has decreased by more than 2,000 titles since 2010, while its number of TV shows has nearly tripled. It will be interesting to explore what all other insights can be obtained from the same dataset. Integrating this dataset with other external datasets such as IMDB ratings, rotten tomatoes can also provide many interesting findings. ### Inspiration Some of the interesting questions (tasks) which can be performed on this dataset - 1. Understanding what content is available in different countries 2. Identifying similar content by matching text-based features 3. Network analysis of Actors / Directors and find interesting insights 4. Is Netflix has increasingly focusing on TV rather than movies in recent years. Kaggle dataset identifier: netflix-shows <jupyter_code>import pandas as pd df = pd.read_csv('netflix-shows/netflix_titles.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 8807 entries, 0 to 8806 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 show_id 8807 non-null object 1 type 8807 non-null object 2 title 8807 non-null object 3 director 6173 non-null object 4 cast 7982 non-null object 5 country 7976 non-null object 6 date_added 8797 non-null object 7 release_year 8807 non-null int64 8 rating 8803 non-null object 9 duration 8804 non-null object 10 listed_in 8807 non-null object 11 description 8807 non-null object dtypes: int64(1), object(11) memory usage: 825.8+ KB <jupyter_text>Examples: { "show_id": "s1", "type": "Movie", "title": "Dick Johnson Is Dead", "director": "Kirsten Johnson", "cast": null, "country": "United States", "date_added": "September 25, 2021", "release_year": 2020, "rating": "PG-13", "duration": "90 min", "listed_in": "Documentaries", "description": "As her father nears the end of his life, filmmaker Kirsten Johnson stages his death in inventive and comical ways to help them both face the inevitable." } { "show_id": "s2", "type": "TV Show", "title": "Blood & Water", "director": null, "cast": "Ama Qamata, Khosi Ngema, Gail Mabalane, Thabang Molaba, Dillon Windvogel, Natasha Thahane, Arno Greeff, Xolile Tshabalala, Getmore Sithole, Cindy Mahlangu, Ryle De Morny, Greteli Fincham, Sello Maake Ka-Ncube, Odwa Gwanya, Mekaila Mathys, Sandi Schultz, Duane Williams, Shamilla Miller, ...(truncated)", "country": "South Africa", "date_added": "September 24, 2021", "release_year": 2021, "rating": "TV-MA", "duration": "2 Seasons", "listed_in": "International TV Shows, TV Dramas, TV Mysteries", "description": "After crossing paths at a party, a Cape Town teen sets out to prove whether a private-school swimming star is her sister who was abducted at birth." } { "show_id": "s3", "type": "TV Show", "title": "Ganglands", "director": "Julien Leclercq", "cast": "Sami Bouajila, Tracy Gotoas, Samuel Jouy, Nabiha Akkari, Sofia Lesaffre, Salim Kechiouche, Noureddine Farihi, Geert Van Rampelberg, Bakary Diombera", "country": null, "date_added": "September 24, 2021", "release_year": 2021, "rating": "TV-MA", "duration": "1 Season", "listed_in": "Crime TV Shows, International TV Shows, TV Action & Adventure", "description": "To protect his family from a powerful drug lord, skilled thief Mehdi and his expert team of robbers are pulled into a violent and deadly turf war." } { "show_id": "s4", "type": "TV Show", "title": "Jailbirds New Orleans", "director": null, "cast": null, "country": null, "date_added": "September 24, 2021", "release_year": 2021, "rating": "TV-MA", "duration": "1 Season", "listed_in": "Docuseries, Reality TV", "description": "Feuds, flirtations and toilet talk go down among the incarcerated women at the Orleans Justice Center in New Orleans on this gritty reality series." } <jupyter_script># # Dataset Netflix e IMDb # ## Importando as principais Bibliotecas # import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import plotly.graph_objects as go import warnings warnings.filterwarnings("ignore") # ou warnings.filterwarnings(action='once') # ## Importando as bases de dados # ### Base de dados do Netflix df = pd.read_csv("../input/netflix-shows/netflix_titles.csv") df.columns df.info() df.isna().sum() # ### Precisamos fazer algumas alterações no Dataframe # - Excluir a coluna 'Cast' e 'Description' # - Criar a coluna 'Year_added' apartir da coluna 'Date_added' # - Criar uma nova coluna "Season_count" no dataset para fixar a duração das series de TV,atráves de uma função Lambda # - Cortar da coluna "duration" os objetos que apresentam duração em temporadas # df.drop(["cast", "description", "director"], axis=1, inplace=True) df df["date_added"] = pd.to_datetime(df["date_added"]) df["year_added"] = df["date_added"].dt.year df["season_count"] = df.apply( lambda x: x["duration"].split(" ")[0] if "Season" in x["duration"] else "", axis=1 ) df["duration_movie"] = df.apply( lambda x: x["duration"].split(" ")[0] if "Season" not in x["duration"] else "", axis=1, ) df.head() df.info() # ### Extraindo o primeiro gênero listado e criando a coluna 'principal_genero' df["principal_genero"] = df["listed_in"].apply(lambda x: x.split(", ")[0]) df["principal_genero"].head() df.loc[20, "listed_in"] df.loc[20, "principal_genero"] df.isna().sum() # ### Alterando as classificações etárias para melhor entendimento ratings_ages = { "TV-PG": "Older Kids", "TV-MA": "Adults", "TV-Y7-FV": "Older Kids", "TV-Y7": "Older Kids", "TV-14": "Teens", "R": "Adults", "TV-Y": "Kids", "NR": "Adults", "PG-13": "Teens", "TV-G": "Kids", "PG": "Older Kids", "G": "Kids", "UR": "Adults", "NC-17": "Adults", } df["target_ages"] = df["rating"].replace(ratings_ages) df["target_ages"].unique() df.columns # ## Transformando dados categóricos e númericos df["type"] = pd.Categorical(df["type"]) df["target_ages"] = pd.Categorical( df["target_ages"], categories=["Kids", "Older Kids", "Teens", "Adults"] ) df["duration_movie"] = pd.to_numeric(df["duration_movie"]) df["year_added"] = pd.to_numeric(df["year_added"]) df.info() # ## Limpando dados faltantes df.isna().sum() # ### Usando o método mode() e utilizando a moda para preencher os valores ausentes df["country"] = df["country"].fillna(df["country"].mode()[0]) df["target_ages"] = df["target_ages"].fillna(df["target_ages"].mode()[0]) df["year_added"] = df["year_added"].fillna(df["year_added"].mode()[0]) df.isna().sum() df.drop( [ "date_added", "rating", "duration", "listed_in", ], axis=1, inplace=True, ) df.describe() df.isna().sum() # ### Criando a coluna do 'principal_country' que acomodará o principal país produtor df["principal_country"] = df["country"].apply(lambda x: x.split(",")[0]) df["principal_country"].head() # ## Separando o DataFrame de Movies e Tv Show's filmes_df = df[df["type"] == "Movie"] tv_show_df = df[df["type"] == "TV Show"] filme = len(filmes_df) tv_show = len(tv_show_df) # ## Começando a Análise Exploratória dos Dados # ### Como se divide o dataset entre Movies e Tv shows? df_types_ = df["type"].value_counts() df_types_ = pd.DataFrame(df_types_) df_types_ = df_types_.reset_index() df_types_ df_types_ = df_types_.rename({"index": "type", "type": "quantidade"}, axis=1) df_types_ df["type"].value_counts(normalize=True) fig = px.bar( df_types_, x="type", y="quantidade", color="type", title="Quantidade por tipo" ) fig.show() # ### Observações: # - Os filmes representam 69% do Dataset da Netflix # ### Como se comporta a produção de Movies e Tv Shows? # - released_year_df será todas as produções lançadas a partir de 2008 agrupadas por ano de lançamento e tipo. # - added_year_df será todas as produções adicionadas ao catálogo da Netdlix a partir de 2008 agrupadas por ano de lançamento e tipo. released_year_df = ( df.loc[df["release_year"] > 2007] .groupby(["release_year", "type"]) .agg({"show_id": "count"}) .reset_index() ) released_year_df added_year_df = ( df.loc[df["year_added"] > 2007] .groupby(["year_added", "type"]) .agg({"show_id": "count"}) .reset_index() ) added_year_df # ## Linha temporal de lançamento e adição de filmes a partir de 2008 fig = go.Figure() fig.add_trace( go.Scatter( x=released_year_df.loc[released_year_df["type"] == "Movie"]["release_year"], y=released_year_df.loc[released_year_df["type"] == "Movie"]["show_id"], mode="lines+markers", name="Movie: Released Year", marker_color="green", ) ) fig.add_trace( go.Scatter( x=released_year_df.loc[released_year_df["type"] == "TV Show"]["release_year"], y=released_year_df.loc[released_year_df["type"] == "TV Show"]["show_id"], mode="lines+markers", name="TV Show: Released Year", marker_color="Darkgreen", ) ) fig.add_trace( go.Scatter( x=added_year_df.loc[added_year_df["type"] == "TV Show"]["year_added"], y=added_year_df.loc[added_year_df["type"] == "TV Show"]["show_id"], mode="lines+markers", name="TV Show: Added Year", marker_color="blue", ) ) fig.add_trace( go.Scatter( x=added_year_df.loc[added_year_df["type"] == "Movie"]["year_added"], y=added_year_df.loc[added_year_df["type"] == "Movie"]["show_id"], mode="lines+markers", name="Movie: Added Year", marker_color="orange", ) ) # ### Os principais países produtores de conteúdo # - Os EUA sozinhos representam 43% do Dataset # - Para melhor analizarmos iremos dividir o dataset com e sem os EUA df["principal_country"].value_counts(normalize=True) main_pais_eua = df["principal_country"].value_counts().head(11).reset_index() fig = px.bar( main_pais_eua, x="index", y="principal_country", color="principal_country", color_continuous_scale="deep", title="10 maiores de produtores de conteúdo da Netflix", ) fig.show() main_pais = df["principal_country"].value_counts().head(11).reset_index()[1:] fig = px.bar( main_pais, x="index", y="principal_country", color="principal_country", color_continuous_scale="deep", title="10 maiores de produtores de conteúdo da Netflix sem os EUA", ) fig.show() # ### Como se divide o Dataset entre as Classificações etárias? No geral e entre 2012 e 2020? # - 'teste_df' representa o dataset 'df' agrupado pela classificação etária e pelo tipo # - 'teste_antes' representa o dataset 'df' agrupado pela classificação etária e pelo tipo adicionados em 2012 # - 'teste_depois' representa o dataset 'df' agrupado pela classificação etária e pelo tipo adicionados em 2020 teste_df = df.groupby(["target_ages", "type"]).agg({"show_id": "count"}).reset_index() teste_antes = ( df.loc[df["year_added"] == 2013] .groupby(["target_ages", "type"]) .agg({"show_id": "count"}) .reset_index() ) teste_depois = ( df.loc[df["year_added"] == 2020] .groupby(["target_ages", "type"]) .agg({"show_id": "count"}) .reset_index() ) teste_df g = sns.catplot( data=teste_df, kind="bar", x="target_ages", y="show_id", hue="type", ci="sd", palette="Reds", alpha=0.6, height=6, ) g.despine(left=True) g.set_axis_labels("target ages", "Número de produções") g.legend.set_title("Legenda:") g = sns.catplot( data=teste_antes, kind="bar", x="target_ages", y="show_id", hue="type", ci="sd", palette="Reds", alpha=0.6, height=6, ) g.despine(left=True) g.set_axis_labels("target ages 2013", "Número de produções 2013") g.legend.set_title("Legenda:") g = sns.catplot( data=teste_depois, kind="bar", x="target_ages", y="show_id", hue="type", ci="sd", palette="Reds", alpha=0.6, height=6, ) g.despine(left=True) g.set_axis_labels("target ages 2020", "Número de produções em 2020") g.legend.set_title("Legenda:") # ### Observações # - Os primeiros TV shows adicionados ao catálogo eram das categorias teens e Adults # - É próximo a proporção de TV shows e Movies no cenário de 2020 em comparaçã ao quadro geral # - Nos dois cenários é bastante próxima a produção de Movies e TV shows da categoria Kids # ## Principais gêneros listados no Dataset df.columns df["principal_genero"].value_counts(normalize=True).head(10) main_genero = pd.DataFrame(df["principal_genero"].value_counts().head(10).reset_index()) main_genero fig = px.bar( main_genero, x="index", y="principal_genero", color="principal_genero", color_continuous_scale="Reds", title="10 principais gêneros listados no Dataset", ) fig.show() # ### Observações # - Drama e Comédia são os gêneros mais famosos no dataset # - O gênero de comédia também pode ser considerado no gênero 'Stand-up Comedy' # ## Entre Movie e Tv Show's: filmes_df.head() tv_show_df.head() filme_pais = filmes_df["principal_country"].value_counts().head(11).reset_index() filme_pais fig = px.bar( filme_pais, x="index", y="principal_country", color="principal_country", color_continuous_scale="deep", title="10 maiores de produtores de filme da Netflix", ) fig.show() # ### Tirando os EUA fig = px.bar( filme_pais[2:], x="index", y="principal_country", color="principal_country", color_continuous_scale="deep", title="10 maiores de produtores de filme da Netflix", ) fig.show() # ## Como se distribuiu no tempo as classificações etárias? # - 'target_year_df' representa o dataset agrupado pelas produções adicionadas em 2012 e pelas suas classificações etárias # - 'target_year_release' representa o dataset agrupado pelo ano de lançamento e pela classificação etárias das produções # - Foi produzido dois gráficos para se avaliar o comportamento das produções de acordo com sua categória etária ao longo do tempo target_year_df = ( df.loc[df["year_added"] > 2013] .groupby(["year_added", "target_ages"]) .agg({"show_id": "count"}) .reset_index() ) target_year_df fig = go.Figure() fig.add_trace( go.Scatter( x=target_year_df.loc[target_year_df["target_ages"] == "Kids"]["year_added"], y=target_year_df.loc[target_year_df["target_ages"] == "Kids"]["show_id"], mode="lines+markers", name="Kids: Added Year", marker_color="green", ) ) fig.add_trace( go.Scatter( x=target_year_df.loc[target_year_df["target_ages"] == "Older Kids"][ "year_added" ], y=target_year_df.loc[target_year_df["target_ages"] == "Older Kids"]["show_id"], mode="lines+markers", name=" Older Kids: Added Year", marker_color="Darkgreen", ) ) fig.add_trace( go.Scatter( x=target_year_df.loc[target_year_df["target_ages"] == "Teens"]["year_added"], y=target_year_df.loc[target_year_df["target_ages"] == "Teens"]["show_id"], mode="lines+markers", name="Teens: Added Year", marker_color="blue", ) ) fig.add_trace( go.Scatter( x=target_year_df.loc[target_year_df["target_ages"] == "Adults"]["year_added"], y=target_year_df.loc[target_year_df["target_ages"] == "Adults"]["show_id"], mode="lines+markers", name="Adults: Added Year", marker_color="orange", ) ) target_year_release = ( df.groupby(["release_year", "target_ages"]).agg({"show_id": "count"}).reset_index() ) target_year_release fig = go.Figure() fig.add_trace( go.Scatter( x=target_year_release.loc[target_year_release["target_ages"] == "Kids"][ "release_year" ], y=target_year_release.loc[target_year_release["target_ages"] == "Kids"][ "show_id" ], mode="lines+markers", name="Kids: Added Year", marker_color="green", ) ) fig.add_trace( go.Scatter( x=target_year_release.loc[target_year_release["target_ages"] == "Older Kids"][ "release_year" ], y=target_year_release.loc[target_year_release["target_ages"] == "Older Kids"][ "show_id" ], mode="lines+markers", name=" Older Kids: Ano de Lançamento", marker_color="Darkgreen", ) ) fig.add_trace( go.Scatter( x=target_year_release.loc[target_year_release["target_ages"] == "Teens"][ "release_year" ], y=target_year_release.loc[target_year_release["target_ages"] == "Teens"][ "show_id" ], mode="lines+markers", name="Teens: Ano de Lançamento", marker_color="blue", ) ) fig.add_trace( go.Scatter( x=target_year_release.loc[target_year_release["target_ages"] == "Adults"][ "release_year" ], y=target_year_release.loc[target_year_release["target_ages"] == "Adults"][ "show_id" ], mode="lines+markers", name="Adults: Ano de Lançamento", marker_color="orange", ) ) fig.show() filmes_df filmes_df.columns # ### Crescimento ou decrescimento marginal da produção de Movies e TV show? filmes_df_ = ( filmes_df.loc[filmes_df["release_year"] > 2007] .groupby("year_added") .agg({"show_id": "count"}) ) filmes_df_.diff().plot() filmes_df_1 = ( filmes_df.loc[filmes_df["release_year"] > 2008] .groupby("release_year") .agg({"show_id": "count"}) ) filmes_df_1.diff().plot() tv_show_df tv_show_df_ = ( tv_show_df.loc[tv_show_df["release_year"] > 2007] .groupby("year_added") .agg({"show_id": "count"}) ) tv_show_df_ tv_show_df_.diff().plot() # ## Diferença entre ano de lançamento e ano de adição no catálogo do Netflix df_teste_1 = df[["title", "release_year", "year_added"]] df_diff = df_teste_1.set_index("title").diff(axis=1) df_diff df_diff df_treino = df[["title", "release_year", "year_added"]] # ## Dos filmes lançados a partir de 2008 qual demorou mais anos para ser adicionado no catálogo da Netflix? target_df = df.loc[df["release_year"] == 2008] target_df = target_df[["title", "release_year", "year_added"]] target_df_diff = target_df.set_index("title").diff(axis=1) target_df_diff.max() # ### Os 10 filmes que mais demoraram para ser adicionados no catálogo da Netflix target_df_diff["year_added"].sort_values(ascending=False).head(10) df.set_index("title").loc["Jaane Tu... Ya Jaane Na"] # # Juntando os datasets do Netflix e do IMDb # ## Anexando o Dataset do IMDB # - Utilizando apenas as colunas : "weighted_average_vote","total_votes" do arquivo "ratings.csv" # - Utilizando apenas as colunas : 'title','year','genre' do arquivo "movies.csv" # imdb_rating = pd.read_csv( "../input/imdb-extensive-dataset/IMDb ratings.csv", usecols=["weighted_average_vote", "total_votes"], ) imdb_rating["total_votes"] imdb_titles = pd.read_csv( "../input/imdb-extensive-dataset/IMDb movies.csv", usecols=["title", "year", "genre"], ) imdb_titles.columns # ### Criando o DataFrame ratings_imdb ratings_imdb = pd.DataFrame( { "Title": imdb_titles["title"], "Release Year": imdb_titles["year"], "Rating": imdb_rating["weighted_average_vote"], "Total_votes": imdb_rating["total_votes"], "Genre": imdb_titles["genre"], } ) ratings_imdb.sort_values("Total_votes", ascending=False).reset_index() len(ratings_imdb) len(df) # ### Juntando com o método 'merge' os dois dataframes joint_data = ratings_imdb.merge(df, left_on="Title", right_on="title", how="inner") len(joint_data) joint_data.isna().sum() joint_data.set_index("Title").sort_values("Rating", ascending=False) # ### Separando os Dataframes de Filmes e TV Shows filmes_joint = joint_data[joint_data["type"] == "Movie"] tv_show_joint = joint_data[joint_data["type"] == "TV Show"] len(tv_show_joint) len(filmes_joint) tv_show_joint.set_index("Title").sort_values("Rating", ascending=False).head(10) filmes_joint.columns # ### Excluindo colunas que não serão utilizadas filmes_joint.drop( [ "title", "country", "Release Year", "Genre", "type", ], axis=1, inplace=True, ) filmes_joint.drop(["season_count"], axis=1, inplace=True) filmes_joint.set_index("Title").sort_values("Rating", ascending=False) filmes_joint.describe().round(2).T # ### Atribuindo classes ao número de votantes # - No dataset há filmes avaliados por mais de 1 milhão de usuários; # - Por outro há filmes avaliados por apenas 100 usuários; # - Seria justo avaliar as notas de produções com diferenças tão discrepantes de avaliações? valor_minimo = filmes_joint["Total_votes"].min() valor_maximo = filmes_joint["Total_votes"].max() classes = [valor_minimo, 50000, 500000, 1000000, valor_maximo] rotulos = ["votacao_D", "votacao_C", "votacao_B", "votacao_A"] filmes_joint["votacao_label"] = pd.cut( x=filmes_joint["Total_votes"], bins=classes, labels=rotulos, include_lowest=True ) filmes_joint.set_index("Title").sort_values("Rating", ascending=False) filmes_joint.groupby("votacao_label").describe().round(2) # ### Tv show valor_minimo = tv_show_joint["Total_votes"].min() valor_maximo = tv_show_joint["Total_votes"].max() valor_minimo valor_maximo tv_show_joint.describe().round(2).T classes_tv = [valor_minimo, 50000, 300000, valor_maximo] rotulos_tv = ["votacao_C", "votacao_B", "votacao_A"] tv_show_joint["votacao_label"] = pd.cut( x=tv_show_joint["Total_votes"], bins=classes_tv, labels=rotulos_tv, include_lowest=True, ) tv_show_joint.set_index("Title").sort_values("Rating", ascending=False) # ## Análise Exploratória dos dados filmes_joint.set_index("Title").sort_values("Rating", ascending=True).head(20) ax1 = sns.distplot(filmes_joint["Rating"], norm_hist=True) ax1.figure.set_size_inches(16, 8) ax1.set_title( "Distribuição das notas", fontsize=18, ) ax1.set_xlabel("Rating", fontsize=14) ax1 = 0 import plotly.express as px fig = px.box(filmes_joint, x="Rating", orientation="h", title="Boxplor das notas IMDb") fig.show() sns.countplot(y="principal_genero", data=filmes_joint) filmes_top = ( filmes_joint.set_index("Title").sort_values("Rating", ascending=False).head(10) ) filmes_top sns.countplot(y="principal_genero", data=filmes_top) filmes_top filmes_D = filmes_joint.loc[filmes_joint["votacao_label"] == "votacao_D"] filmes_C = filmes_joint.loc[filmes_joint["votacao_label"] == "votacao_C"] filmes_B = filmes_joint.loc[filmes_joint["votacao_label"] == "votacao_B"] filmes_A = filmes_joint.loc[filmes_joint["votacao_label"] == "votacao_A"] sns.set(style="darkgrid") sns.kdeplot(data=filmes_A["Rating"]) filmes_A.describe().round(2).T filmes_D.describe().round(2).T filmes_B.describe().round(2).T # ### Curva de densidade da duração de Movies # - A curva de duração dos Movies se assemelha a uma curva normal, mas assimetrica à direita sns.set(style="darkgrid") sns.kdeplot(data=filmes_joint["duration_movie"])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/678/69678111.ipynb
netflix-shows
shivamb
[{"Id": 69678111, "ScriptId": 19037014, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7085276, "CreationDate": "08/02/2021 18:00:33", "VersionNumber": 1.0, "Title": "NETFLIX and IMDb EDA! Let's learning together!", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 619.0, "LinesInsertedFromPrevious": 619.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93147240, "KernelVersionId": 69678111, "SourceDatasetVersionId": 1856944}]
[{"Id": 1856944, "DatasetId": 434238, "DatasourceVersionId": 1894798, "CreatorUserId": 1571785, "LicenseName": "CC0: Public Domain", "CreationDate": "01/18/2021 16:20:26", "VersionNumber": 4.0, "Title": "Netflix Movies and TV Shows", "Slug": "netflix-shows", "Subtitle": "Listings of movies and tv shows on Netflix - Regularly Updated", "Description": "### TV Shows and Movies listed on Netflix\n\nThis dataset consists of tv shows and movies available on Netflix as of 2019. The dataset is collected from Flixable which is a third-party Netflix search engine. \n\nIn 2018, they released an interesting [report](https://flixable.com/netflix-museum/) which shows that the number of TV shows on Netflix has nearly tripled since 2010. The streaming service\u2019s number of movies has decreased by more than 2,000 titles since 2010, while its number of TV shows has nearly tripled. It will be interesting to explore what all other insights can be obtained from the same dataset. \n\nIntegrating this dataset with other external datasets such as IMDB ratings, rotten tomatoes can also provide many interesting findings. \n\n### Inspiration\n\nSome of the interesting questions (tasks) which can be performed on this dataset - \n\n1. Understanding what content is available in different countries\n2. Identifying similar content by matching text-based features \n3. Network analysis of Actors / Directors and find interesting insights \n4. Is Netflix has increasingly focusing on TV rather than movies in recent years.", "VersionNotes": "Updated till 2021", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 434238, "CreatorUserId": 1571785, "OwnerUserId": 1571785.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2654038.0, "CurrentDatasourceVersionId": 2698094.0, "ForumId": 446914, "Type": 2, "CreationDate": "12/04/2019 05:57:54", "LastActivityDate": "12/04/2019", "TotalViews": 2438410, "TotalDownloads": 358670, "TotalVotes": 7671, "TotalKernels": 1385}]
[{"Id": 1571785, "UserName": "shivamb", "DisplayName": "Shivam Bansal", "RegisterDate": "01/22/2018", "PerformanceTier": 4}]
# # Dataset Netflix e IMDb # ## Importando as principais Bibliotecas # import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import plotly.graph_objects as go import warnings warnings.filterwarnings("ignore") # ou warnings.filterwarnings(action='once') # ## Importando as bases de dados # ### Base de dados do Netflix df = pd.read_csv("../input/netflix-shows/netflix_titles.csv") df.columns df.info() df.isna().sum() # ### Precisamos fazer algumas alterações no Dataframe # - Excluir a coluna 'Cast' e 'Description' # - Criar a coluna 'Year_added' apartir da coluna 'Date_added' # - Criar uma nova coluna "Season_count" no dataset para fixar a duração das series de TV,atráves de uma função Lambda # - Cortar da coluna "duration" os objetos que apresentam duração em temporadas # df.drop(["cast", "description", "director"], axis=1, inplace=True) df df["date_added"] = pd.to_datetime(df["date_added"]) df["year_added"] = df["date_added"].dt.year df["season_count"] = df.apply( lambda x: x["duration"].split(" ")[0] if "Season" in x["duration"] else "", axis=1 ) df["duration_movie"] = df.apply( lambda x: x["duration"].split(" ")[0] if "Season" not in x["duration"] else "", axis=1, ) df.head() df.info() # ### Extraindo o primeiro gênero listado e criando a coluna 'principal_genero' df["principal_genero"] = df["listed_in"].apply(lambda x: x.split(", ")[0]) df["principal_genero"].head() df.loc[20, "listed_in"] df.loc[20, "principal_genero"] df.isna().sum() # ### Alterando as classificações etárias para melhor entendimento ratings_ages = { "TV-PG": "Older Kids", "TV-MA": "Adults", "TV-Y7-FV": "Older Kids", "TV-Y7": "Older Kids", "TV-14": "Teens", "R": "Adults", "TV-Y": "Kids", "NR": "Adults", "PG-13": "Teens", "TV-G": "Kids", "PG": "Older Kids", "G": "Kids", "UR": "Adults", "NC-17": "Adults", } df["target_ages"] = df["rating"].replace(ratings_ages) df["target_ages"].unique() df.columns # ## Transformando dados categóricos e númericos df["type"] = pd.Categorical(df["type"]) df["target_ages"] = pd.Categorical( df["target_ages"], categories=["Kids", "Older Kids", "Teens", "Adults"] ) df["duration_movie"] = pd.to_numeric(df["duration_movie"]) df["year_added"] = pd.to_numeric(df["year_added"]) df.info() # ## Limpando dados faltantes df.isna().sum() # ### Usando o método mode() e utilizando a moda para preencher os valores ausentes df["country"] = df["country"].fillna(df["country"].mode()[0]) df["target_ages"] = df["target_ages"].fillna(df["target_ages"].mode()[0]) df["year_added"] = df["year_added"].fillna(df["year_added"].mode()[0]) df.isna().sum() df.drop( [ "date_added", "rating", "duration", "listed_in", ], axis=1, inplace=True, ) df.describe() df.isna().sum() # ### Criando a coluna do 'principal_country' que acomodará o principal país produtor df["principal_country"] = df["country"].apply(lambda x: x.split(",")[0]) df["principal_country"].head() # ## Separando o DataFrame de Movies e Tv Show's filmes_df = df[df["type"] == "Movie"] tv_show_df = df[df["type"] == "TV Show"] filme = len(filmes_df) tv_show = len(tv_show_df) # ## Começando a Análise Exploratória dos Dados # ### Como se divide o dataset entre Movies e Tv shows? df_types_ = df["type"].value_counts() df_types_ = pd.DataFrame(df_types_) df_types_ = df_types_.reset_index() df_types_ df_types_ = df_types_.rename({"index": "type", "type": "quantidade"}, axis=1) df_types_ df["type"].value_counts(normalize=True) fig = px.bar( df_types_, x="type", y="quantidade", color="type", title="Quantidade por tipo" ) fig.show() # ### Observações: # - Os filmes representam 69% do Dataset da Netflix # ### Como se comporta a produção de Movies e Tv Shows? # - released_year_df será todas as produções lançadas a partir de 2008 agrupadas por ano de lançamento e tipo. # - added_year_df será todas as produções adicionadas ao catálogo da Netdlix a partir de 2008 agrupadas por ano de lançamento e tipo. released_year_df = ( df.loc[df["release_year"] > 2007] .groupby(["release_year", "type"]) .agg({"show_id": "count"}) .reset_index() ) released_year_df added_year_df = ( df.loc[df["year_added"] > 2007] .groupby(["year_added", "type"]) .agg({"show_id": "count"}) .reset_index() ) added_year_df # ## Linha temporal de lançamento e adição de filmes a partir de 2008 fig = go.Figure() fig.add_trace( go.Scatter( x=released_year_df.loc[released_year_df["type"] == "Movie"]["release_year"], y=released_year_df.loc[released_year_df["type"] == "Movie"]["show_id"], mode="lines+markers", name="Movie: Released Year", marker_color="green", ) ) fig.add_trace( go.Scatter( x=released_year_df.loc[released_year_df["type"] == "TV Show"]["release_year"], y=released_year_df.loc[released_year_df["type"] == "TV Show"]["show_id"], mode="lines+markers", name="TV Show: Released Year", marker_color="Darkgreen", ) ) fig.add_trace( go.Scatter( x=added_year_df.loc[added_year_df["type"] == "TV Show"]["year_added"], y=added_year_df.loc[added_year_df["type"] == "TV Show"]["show_id"], mode="lines+markers", name="TV Show: Added Year", marker_color="blue", ) ) fig.add_trace( go.Scatter( x=added_year_df.loc[added_year_df["type"] == "Movie"]["year_added"], y=added_year_df.loc[added_year_df["type"] == "Movie"]["show_id"], mode="lines+markers", name="Movie: Added Year", marker_color="orange", ) ) # ### Os principais países produtores de conteúdo # - Os EUA sozinhos representam 43% do Dataset # - Para melhor analizarmos iremos dividir o dataset com e sem os EUA df["principal_country"].value_counts(normalize=True) main_pais_eua = df["principal_country"].value_counts().head(11).reset_index() fig = px.bar( main_pais_eua, x="index", y="principal_country", color="principal_country", color_continuous_scale="deep", title="10 maiores de produtores de conteúdo da Netflix", ) fig.show() main_pais = df["principal_country"].value_counts().head(11).reset_index()[1:] fig = px.bar( main_pais, x="index", y="principal_country", color="principal_country", color_continuous_scale="deep", title="10 maiores de produtores de conteúdo da Netflix sem os EUA", ) fig.show() # ### Como se divide o Dataset entre as Classificações etárias? No geral e entre 2012 e 2020? # - 'teste_df' representa o dataset 'df' agrupado pela classificação etária e pelo tipo # - 'teste_antes' representa o dataset 'df' agrupado pela classificação etária e pelo tipo adicionados em 2012 # - 'teste_depois' representa o dataset 'df' agrupado pela classificação etária e pelo tipo adicionados em 2020 teste_df = df.groupby(["target_ages", "type"]).agg({"show_id": "count"}).reset_index() teste_antes = ( df.loc[df["year_added"] == 2013] .groupby(["target_ages", "type"]) .agg({"show_id": "count"}) .reset_index() ) teste_depois = ( df.loc[df["year_added"] == 2020] .groupby(["target_ages", "type"]) .agg({"show_id": "count"}) .reset_index() ) teste_df g = sns.catplot( data=teste_df, kind="bar", x="target_ages", y="show_id", hue="type", ci="sd", palette="Reds", alpha=0.6, height=6, ) g.despine(left=True) g.set_axis_labels("target ages", "Número de produções") g.legend.set_title("Legenda:") g = sns.catplot( data=teste_antes, kind="bar", x="target_ages", y="show_id", hue="type", ci="sd", palette="Reds", alpha=0.6, height=6, ) g.despine(left=True) g.set_axis_labels("target ages 2013", "Número de produções 2013") g.legend.set_title("Legenda:") g = sns.catplot( data=teste_depois, kind="bar", x="target_ages", y="show_id", hue="type", ci="sd", palette="Reds", alpha=0.6, height=6, ) g.despine(left=True) g.set_axis_labels("target ages 2020", "Número de produções em 2020") g.legend.set_title("Legenda:") # ### Observações # - Os primeiros TV shows adicionados ao catálogo eram das categorias teens e Adults # - É próximo a proporção de TV shows e Movies no cenário de 2020 em comparaçã ao quadro geral # - Nos dois cenários é bastante próxima a produção de Movies e TV shows da categoria Kids # ## Principais gêneros listados no Dataset df.columns df["principal_genero"].value_counts(normalize=True).head(10) main_genero = pd.DataFrame(df["principal_genero"].value_counts().head(10).reset_index()) main_genero fig = px.bar( main_genero, x="index", y="principal_genero", color="principal_genero", color_continuous_scale="Reds", title="10 principais gêneros listados no Dataset", ) fig.show() # ### Observações # - Drama e Comédia são os gêneros mais famosos no dataset # - O gênero de comédia também pode ser considerado no gênero 'Stand-up Comedy' # ## Entre Movie e Tv Show's: filmes_df.head() tv_show_df.head() filme_pais = filmes_df["principal_country"].value_counts().head(11).reset_index() filme_pais fig = px.bar( filme_pais, x="index", y="principal_country", color="principal_country", color_continuous_scale="deep", title="10 maiores de produtores de filme da Netflix", ) fig.show() # ### Tirando os EUA fig = px.bar( filme_pais[2:], x="index", y="principal_country", color="principal_country", color_continuous_scale="deep", title="10 maiores de produtores de filme da Netflix", ) fig.show() # ## Como se distribuiu no tempo as classificações etárias? # - 'target_year_df' representa o dataset agrupado pelas produções adicionadas em 2012 e pelas suas classificações etárias # - 'target_year_release' representa o dataset agrupado pelo ano de lançamento e pela classificação etárias das produções # - Foi produzido dois gráficos para se avaliar o comportamento das produções de acordo com sua categória etária ao longo do tempo target_year_df = ( df.loc[df["year_added"] > 2013] .groupby(["year_added", "target_ages"]) .agg({"show_id": "count"}) .reset_index() ) target_year_df fig = go.Figure() fig.add_trace( go.Scatter( x=target_year_df.loc[target_year_df["target_ages"] == "Kids"]["year_added"], y=target_year_df.loc[target_year_df["target_ages"] == "Kids"]["show_id"], mode="lines+markers", name="Kids: Added Year", marker_color="green", ) ) fig.add_trace( go.Scatter( x=target_year_df.loc[target_year_df["target_ages"] == "Older Kids"][ "year_added" ], y=target_year_df.loc[target_year_df["target_ages"] == "Older Kids"]["show_id"], mode="lines+markers", name=" Older Kids: Added Year", marker_color="Darkgreen", ) ) fig.add_trace( go.Scatter( x=target_year_df.loc[target_year_df["target_ages"] == "Teens"]["year_added"], y=target_year_df.loc[target_year_df["target_ages"] == "Teens"]["show_id"], mode="lines+markers", name="Teens: Added Year", marker_color="blue", ) ) fig.add_trace( go.Scatter( x=target_year_df.loc[target_year_df["target_ages"] == "Adults"]["year_added"], y=target_year_df.loc[target_year_df["target_ages"] == "Adults"]["show_id"], mode="lines+markers", name="Adults: Added Year", marker_color="orange", ) ) target_year_release = ( df.groupby(["release_year", "target_ages"]).agg({"show_id": "count"}).reset_index() ) target_year_release fig = go.Figure() fig.add_trace( go.Scatter( x=target_year_release.loc[target_year_release["target_ages"] == "Kids"][ "release_year" ], y=target_year_release.loc[target_year_release["target_ages"] == "Kids"][ "show_id" ], mode="lines+markers", name="Kids: Added Year", marker_color="green", ) ) fig.add_trace( go.Scatter( x=target_year_release.loc[target_year_release["target_ages"] == "Older Kids"][ "release_year" ], y=target_year_release.loc[target_year_release["target_ages"] == "Older Kids"][ "show_id" ], mode="lines+markers", name=" Older Kids: Ano de Lançamento", marker_color="Darkgreen", ) ) fig.add_trace( go.Scatter( x=target_year_release.loc[target_year_release["target_ages"] == "Teens"][ "release_year" ], y=target_year_release.loc[target_year_release["target_ages"] == "Teens"][ "show_id" ], mode="lines+markers", name="Teens: Ano de Lançamento", marker_color="blue", ) ) fig.add_trace( go.Scatter( x=target_year_release.loc[target_year_release["target_ages"] == "Adults"][ "release_year" ], y=target_year_release.loc[target_year_release["target_ages"] == "Adults"][ "show_id" ], mode="lines+markers", name="Adults: Ano de Lançamento", marker_color="orange", ) ) fig.show() filmes_df filmes_df.columns # ### Crescimento ou decrescimento marginal da produção de Movies e TV show? filmes_df_ = ( filmes_df.loc[filmes_df["release_year"] > 2007] .groupby("year_added") .agg({"show_id": "count"}) ) filmes_df_.diff().plot() filmes_df_1 = ( filmes_df.loc[filmes_df["release_year"] > 2008] .groupby("release_year") .agg({"show_id": "count"}) ) filmes_df_1.diff().plot() tv_show_df tv_show_df_ = ( tv_show_df.loc[tv_show_df["release_year"] > 2007] .groupby("year_added") .agg({"show_id": "count"}) ) tv_show_df_ tv_show_df_.diff().plot() # ## Diferença entre ano de lançamento e ano de adição no catálogo do Netflix df_teste_1 = df[["title", "release_year", "year_added"]] df_diff = df_teste_1.set_index("title").diff(axis=1) df_diff df_diff df_treino = df[["title", "release_year", "year_added"]] # ## Dos filmes lançados a partir de 2008 qual demorou mais anos para ser adicionado no catálogo da Netflix? target_df = df.loc[df["release_year"] == 2008] target_df = target_df[["title", "release_year", "year_added"]] target_df_diff = target_df.set_index("title").diff(axis=1) target_df_diff.max() # ### Os 10 filmes que mais demoraram para ser adicionados no catálogo da Netflix target_df_diff["year_added"].sort_values(ascending=False).head(10) df.set_index("title").loc["Jaane Tu... Ya Jaane Na"] # # Juntando os datasets do Netflix e do IMDb # ## Anexando o Dataset do IMDB # - Utilizando apenas as colunas : "weighted_average_vote","total_votes" do arquivo "ratings.csv" # - Utilizando apenas as colunas : 'title','year','genre' do arquivo "movies.csv" # imdb_rating = pd.read_csv( "../input/imdb-extensive-dataset/IMDb ratings.csv", usecols=["weighted_average_vote", "total_votes"], ) imdb_rating["total_votes"] imdb_titles = pd.read_csv( "../input/imdb-extensive-dataset/IMDb movies.csv", usecols=["title", "year", "genre"], ) imdb_titles.columns # ### Criando o DataFrame ratings_imdb ratings_imdb = pd.DataFrame( { "Title": imdb_titles["title"], "Release Year": imdb_titles["year"], "Rating": imdb_rating["weighted_average_vote"], "Total_votes": imdb_rating["total_votes"], "Genre": imdb_titles["genre"], } ) ratings_imdb.sort_values("Total_votes", ascending=False).reset_index() len(ratings_imdb) len(df) # ### Juntando com o método 'merge' os dois dataframes joint_data = ratings_imdb.merge(df, left_on="Title", right_on="title", how="inner") len(joint_data) joint_data.isna().sum() joint_data.set_index("Title").sort_values("Rating", ascending=False) # ### Separando os Dataframes de Filmes e TV Shows filmes_joint = joint_data[joint_data["type"] == "Movie"] tv_show_joint = joint_data[joint_data["type"] == "TV Show"] len(tv_show_joint) len(filmes_joint) tv_show_joint.set_index("Title").sort_values("Rating", ascending=False).head(10) filmes_joint.columns # ### Excluindo colunas que não serão utilizadas filmes_joint.drop( [ "title", "country", "Release Year", "Genre", "type", ], axis=1, inplace=True, ) filmes_joint.drop(["season_count"], axis=1, inplace=True) filmes_joint.set_index("Title").sort_values("Rating", ascending=False) filmes_joint.describe().round(2).T # ### Atribuindo classes ao número de votantes # - No dataset há filmes avaliados por mais de 1 milhão de usuários; # - Por outro há filmes avaliados por apenas 100 usuários; # - Seria justo avaliar as notas de produções com diferenças tão discrepantes de avaliações? valor_minimo = filmes_joint["Total_votes"].min() valor_maximo = filmes_joint["Total_votes"].max() classes = [valor_minimo, 50000, 500000, 1000000, valor_maximo] rotulos = ["votacao_D", "votacao_C", "votacao_B", "votacao_A"] filmes_joint["votacao_label"] = pd.cut( x=filmes_joint["Total_votes"], bins=classes, labels=rotulos, include_lowest=True ) filmes_joint.set_index("Title").sort_values("Rating", ascending=False) filmes_joint.groupby("votacao_label").describe().round(2) # ### Tv show valor_minimo = tv_show_joint["Total_votes"].min() valor_maximo = tv_show_joint["Total_votes"].max() valor_minimo valor_maximo tv_show_joint.describe().round(2).T classes_tv = [valor_minimo, 50000, 300000, valor_maximo] rotulos_tv = ["votacao_C", "votacao_B", "votacao_A"] tv_show_joint["votacao_label"] = pd.cut( x=tv_show_joint["Total_votes"], bins=classes_tv, labels=rotulos_tv, include_lowest=True, ) tv_show_joint.set_index("Title").sort_values("Rating", ascending=False) # ## Análise Exploratória dos dados filmes_joint.set_index("Title").sort_values("Rating", ascending=True).head(20) ax1 = sns.distplot(filmes_joint["Rating"], norm_hist=True) ax1.figure.set_size_inches(16, 8) ax1.set_title( "Distribuição das notas", fontsize=18, ) ax1.set_xlabel("Rating", fontsize=14) ax1 = 0 import plotly.express as px fig = px.box(filmes_joint, x="Rating", orientation="h", title="Boxplor das notas IMDb") fig.show() sns.countplot(y="principal_genero", data=filmes_joint) filmes_top = ( filmes_joint.set_index("Title").sort_values("Rating", ascending=False).head(10) ) filmes_top sns.countplot(y="principal_genero", data=filmes_top) filmes_top filmes_D = filmes_joint.loc[filmes_joint["votacao_label"] == "votacao_D"] filmes_C = filmes_joint.loc[filmes_joint["votacao_label"] == "votacao_C"] filmes_B = filmes_joint.loc[filmes_joint["votacao_label"] == "votacao_B"] filmes_A = filmes_joint.loc[filmes_joint["votacao_label"] == "votacao_A"] sns.set(style="darkgrid") sns.kdeplot(data=filmes_A["Rating"]) filmes_A.describe().round(2).T filmes_D.describe().round(2).T filmes_B.describe().round(2).T # ### Curva de densidade da duração de Movies # - A curva de duração dos Movies se assemelha a uma curva normal, mas assimetrica à direita sns.set(style="darkgrid") sns.kdeplot(data=filmes_joint["duration_movie"])
[{"netflix-shows/netflix_titles.csv": {"column_names": "[\"show_id\", \"type\", \"title\", \"director\", \"cast\", \"country\", \"date_added\", \"release_year\", \"rating\", \"duration\", \"listed_in\", \"description\"]", "column_data_types": "{\"show_id\": \"object\", \"type\": \"object\", \"title\": \"object\", \"director\": \"object\", \"cast\": \"object\", \"country\": \"object\", \"date_added\": \"object\", \"release_year\": \"int64\", \"rating\": \"object\", \"duration\": \"object\", \"listed_in\": \"object\", \"description\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 8807 entries, 0 to 8806\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 show_id 8807 non-null object\n 1 type 8807 non-null object\n 2 title 8807 non-null object\n 3 director 6173 non-null object\n 4 cast 7982 non-null object\n 5 country 7976 non-null object\n 6 date_added 8797 non-null object\n 7 release_year 8807 non-null int64 \n 8 rating 8803 non-null object\n 9 duration 8804 non-null object\n 10 listed_in 8807 non-null object\n 11 description 8807 non-null object\ndtypes: int64(1), object(11)\nmemory usage: 825.8+ KB\n", "summary": "{\"release_year\": {\"count\": 8807.0, \"mean\": 2014.1801975701146, \"std\": 8.819312130833966, \"min\": 1925.0, \"25%\": 2013.0, \"50%\": 2017.0, \"75%\": 2019.0, \"max\": 2021.0}}", "examples": "{\"show_id\":{\"0\":\"s1\",\"1\":\"s2\",\"2\":\"s3\",\"3\":\"s4\"},\"type\":{\"0\":\"Movie\",\"1\":\"TV Show\",\"2\":\"TV Show\",\"3\":\"TV Show\"},\"title\":{\"0\":\"Dick Johnson Is Dead\",\"1\":\"Blood & Water\",\"2\":\"Ganglands\",\"3\":\"Jailbirds New Orleans\"},\"director\":{\"0\":\"Kirsten Johnson\",\"1\":null,\"2\":\"Julien Leclercq\",\"3\":null},\"cast\":{\"0\":null,\"1\":\"Ama Qamata, Khosi Ngema, Gail Mabalane, Thabang Molaba, Dillon Windvogel, Natasha Thahane, Arno Greeff, Xolile Tshabalala, Getmore Sithole, Cindy Mahlangu, Ryle De Morny, Greteli Fincham, Sello Maake Ka-Ncube, Odwa Gwanya, Mekaila Mathys, Sandi Schultz, Duane Williams, Shamilla Miller, Patrick Mofokeng\",\"2\":\"Sami Bouajila, Tracy Gotoas, Samuel Jouy, Nabiha Akkari, Sofia Lesaffre, Salim Kechiouche, Noureddine Farihi, Geert Van Rampelberg, Bakary Diombera\",\"3\":null},\"country\":{\"0\":\"United States\",\"1\":\"South Africa\",\"2\":null,\"3\":null},\"date_added\":{\"0\":\"September 25, 2021\",\"1\":\"September 24, 2021\",\"2\":\"September 24, 2021\",\"3\":\"September 24, 2021\"},\"release_year\":{\"0\":2020,\"1\":2021,\"2\":2021,\"3\":2021},\"rating\":{\"0\":\"PG-13\",\"1\":\"TV-MA\",\"2\":\"TV-MA\",\"3\":\"TV-MA\"},\"duration\":{\"0\":\"90 min\",\"1\":\"2 Seasons\",\"2\":\"1 Season\",\"3\":\"1 Season\"},\"listed_in\":{\"0\":\"Documentaries\",\"1\":\"International TV Shows, TV Dramas, TV Mysteries\",\"2\":\"Crime TV Shows, International TV Shows, TV Action & Adventure\",\"3\":\"Docuseries, Reality TV\"},\"description\":{\"0\":\"As her father nears the end of his life, filmmaker Kirsten Johnson stages his death in inventive and comical ways to help them both face the inevitable.\",\"1\":\"After crossing paths at a party, a Cape Town teen sets out to prove whether a private-school swimming star is her sister who was abducted at birth.\",\"2\":\"To protect his family from a powerful drug lord, skilled thief Mehdi and his expert team of robbers are pulled into a violent and deadly turf war.\",\"3\":\"Feuds, flirtations and toilet talk go down among the incarcerated women at the Orleans Justice Center in New Orleans on this gritty reality series.\"}}"}}]
true
3
<start_data_description><data_path>netflix-shows/netflix_titles.csv: <column_names> ['show_id', 'type', 'title', 'director', 'cast', 'country', 'date_added', 'release_year', 'rating', 'duration', 'listed_in', 'description'] <column_types> {'show_id': 'object', 'type': 'object', 'title': 'object', 'director': 'object', 'cast': 'object', 'country': 'object', 'date_added': 'object', 'release_year': 'int64', 'rating': 'object', 'duration': 'object', 'listed_in': 'object', 'description': 'object'} <dataframe_Summary> {'release_year': {'count': 8807.0, 'mean': 2014.1801975701146, 'std': 8.819312130833966, 'min': 1925.0, '25%': 2013.0, '50%': 2017.0, '75%': 2019.0, 'max': 2021.0}} <dataframe_info> RangeIndex: 8807 entries, 0 to 8806 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 show_id 8807 non-null object 1 type 8807 non-null object 2 title 8807 non-null object 3 director 6173 non-null object 4 cast 7982 non-null object 5 country 7976 non-null object 6 date_added 8797 non-null object 7 release_year 8807 non-null int64 8 rating 8803 non-null object 9 duration 8804 non-null object 10 listed_in 8807 non-null object 11 description 8807 non-null object dtypes: int64(1), object(11) memory usage: 825.8+ KB <some_examples> {'show_id': {'0': 's1', '1': 's2', '2': 's3', '3': 's4'}, 'type': {'0': 'Movie', '1': 'TV Show', '2': 'TV Show', '3': 'TV Show'}, 'title': {'0': 'Dick Johnson Is Dead', '1': 'Blood & Water', '2': 'Ganglands', '3': 'Jailbirds New Orleans'}, 'director': {'0': 'Kirsten Johnson', '1': None, '2': 'Julien Leclercq', '3': None}, 'cast': {'0': None, '1': 'Ama Qamata, Khosi Ngema, Gail Mabalane, Thabang Molaba, Dillon Windvogel, Natasha Thahane, Arno Greeff, Xolile Tshabalala, Getmore Sithole, Cindy Mahlangu, Ryle De Morny, Greteli Fincham, Sello Maake Ka-Ncube, Odwa Gwanya, Mekaila Mathys, Sandi Schultz, Duane Williams, Shamilla Miller, Patrick Mofokeng', '2': 'Sami Bouajila, Tracy Gotoas, Samuel Jouy, Nabiha Akkari, Sofia Lesaffre, Salim Kechiouche, Noureddine Farihi, Geert Van Rampelberg, Bakary Diombera', '3': None}, 'country': {'0': 'United States', '1': 'South Africa', '2': None, '3': None}, 'date_added': {'0': 'September 25, 2021', '1': 'September 24, 2021', '2': 'September 24, 2021', '3': 'September 24, 2021'}, 'release_year': {'0': 2020, '1': 2021, '2': 2021, '3': 2021}, 'rating': {'0': 'PG-13', '1': 'TV-MA', '2': 'TV-MA', '3': 'TV-MA'}, 'duration': {'0': '90 min', '1': '2 Seasons', '2': '1 Season', '3': '1 Season'}, 'listed_in': {'0': 'Documentaries', '1': 'International TV Shows, TV Dramas, TV Mysteries', '2': 'Crime TV Shows, International TV Shows, TV Action & Adventure', '3': 'Docuseries, Reality TV'}, 'description': {'0': 'As her father nears the end of his life, filmmaker Kirsten Johnson stages his death in inventive and comical ways to help them both face the inevitable.', '1': 'After crossing paths at a party, a Cape Town teen sets out to prove whether a private-school swimming star is her sister who was abducted at birth.', '2': 'To protect his family from a powerful drug lord, skilled thief Mehdi and his expert team of robbers are pulled into a violent and deadly turf war.', '3': 'Feuds, flirtations and toilet talk go down among the incarcerated women at the Orleans Justice Center in New Orleans on this gritty reality series.'}} <end_description>
6,659
0
8,111
6,659
69678067
<jupyter_start><jupyter_text>RETAIL ANALYSIS WITH WALMART SALES DATA Historical sales data for 45 Walmart stores located in different regions are available. There are certain events and holidays which impact sales on each day. The business is facing a challenge due to unforeseen demands and runs out of stock some times, due to inappropriate machine learning algorithm. Walmart would like to predict the sales and demand accurately. An ideal ML algorithm will predict demand accurately and ingest factors like economic conditions including CPI, Unemployment Index, etc. The objective is to determine the factors affecting the sales and to analyze the impact of markdowns around holidays on the sales. **Holiday Events** Super Bowl: 12-Feb-10, 11-Feb-11, 10-Feb-12, 8-Feb-13 Labour Day: 10-Sep-10, 9-Sep-11, 7-Sep-12, 6-Sep-13 Thanksgiving: 26-Nov-10, 25-Nov-11, 23-Nov-12, 29-Nov-13 Christmas: 31-Dec-10, 30-Dec-11, 28-Dec-12, 27-Dec-13 **Analysis Tasks** **Basic Statistics tasks** 1) Which store has maximum sales 2) Which store has maximum standard deviation i.e., the sales vary a lot. Also, find out the coefficient of mean to standard deviation 3) Which store/s has good quarterly growth rate in Q3’2012 4) Some holidays have a negative impact on sales. Find out holidays which have higher sales than the mean sales in non-holiday season for all stores together 5) Provide a monthly and semester view of sales in units and give insights **Statistical Model** For Store 1 – Build prediction models to forecast demand (Linear Regression – Utilize variables like date and restructure dates as 1 for 5 Feb 2010 (starting from the earliest date in order). Hypothesize if CPI, unemployment, and fuel price have any impact on sales.) Change dates into days by creating new variable. Select the model which gives best accuracy. Kaggle dataset identifier: retail-analysis-with-walmart-sales-data <jupyter_code>import pandas as pd df = pd.read_csv('retail-analysis-with-walmart-sales-data/WALMART_SALES_DATA.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 6435 entries, 0 to 6434 Data columns (total 8 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Store 6435 non-null int64 1 Date 6435 non-null object 2 Weekly_Sales 6435 non-null float64 3 Holiday_Flag 6435 non-null int64 4 Temperature 6435 non-null float64 5 Fuel_Price 6435 non-null float64 6 CPI 6435 non-null float64 7 Unemployment 6435 non-null float64 dtypes: float64(5), int64(2), object(1) memory usage: 402.3+ KB <jupyter_text>Examples: { "Store": 1, "Date": "2010-05-02 00:00:00", "Weekly_Sales": 1643690.9, "Holiday_Flag": 0, "Temperature": 42.31, "Fuel_Price": 2.572, "CPI": 211.0963582, "Unemployment": 8.106 } { "Store": 1, "Date": "2010-12-02 00:00:00", "Weekly_Sales": 1641957.44, "Holiday_Flag": 1, "Temperature": 38.51, "Fuel_Price": 2.548, "CPI": 211.2421698, "Unemployment": 8.106 } { "Store": 1, "Date": "2010-02-19 00:00:00", "Weekly_Sales": 1611968.17, "Holiday_Flag": 0, "Temperature": 39.93, "Fuel_Price": 2.5140000000000002, "CPI": 211.2891429, "Unemployment": 8.106 } { "Store": 1, "Date": "2010-02-26 00:00:00", "Weekly_Sales": 1409727.59, "Holiday_Flag": 0, "Temperature": 46.63, "Fuel_Price": 2.561, "CPI": 211.3196429, "Unemployment": 8.106 } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( r"/kaggle/input/retail-analysis-with-walmart-sales-data/WALMART_SALES_DATA.csv" ) df # # **EDA** import matplotlib.pyplot as plt import seaborn as sns sns.heatmap(df.isnull(), yticklabels=False, cbar=False) df.columns df.describe() sns.heatmap(df.corr(), annot=True) sns.scatterplot( x="Weekly_Sales", y="CPI", palette="ch:r=-.2,d=.3_r", hue="Holiday_Flag", sizes=(1, 8), linewidth=0, data=df, ) sns.pairplot(df) df["Date"] def ss(d): k = [] k = d.split("-") return int(k[0]) df["Day"] = df["Date"].apply(ss) df["Year"] = pd.DatetimeIndex(df["Date"]).year df["Month"] = pd.DatetimeIndex(df["Date"]).month df.drop(["year", "month"], inplace=True, axis=1) df plt.figure(figsize=(10, 10)) sns.heatmap(df.corr(), annot=True) df.drop("Day", axis=1, inplace=True) plt.figure(figsize=(20, 20)) df[["Temperature", "Weekly_Sales"]].hist() sns.relplot(x=df["Store"], y=df["Weekly_Sales"], kind="line") df1 = pd.DataFrame() df1["Store"] = df["Store"].unique() df[df["Store"] == 1]["Weekly_Sales"].sum() def tot_sales(d): k = df[df["Store"] == d]["Weekly_Sales"].sum() df1["Total_Sale"] = df1["Store"].apply( lambda x: df[df["Store"] == x]["Weekly_Sales"].sum() ) df1[df1["Total_Sale"] == df1["Total_Sale"].max()] plt.figure(figsize=(12, 6)) sns.barplot(x=df1["Store"], y=df1["Total_Sale"]) plt.tight_layout() sns.relplot(x=df1["Store"], y=df1["Total_Sale"], kind="line") # Store 20 max sales df1["STD"] = df1["Store"].apply(lambda x: df[df["Store"] == x]["Weekly_Sales"].std()) plt.figure(figsize=(12, 6)) sns.barplot(x=df1["Store"], y=df1["STD"]) plt.tight_layout() # Store #14 has max std def goodQ312(d): f = df[ (df["Store"] == d) & (df["Year"] == 2012) & (df["Month"] > 6) & (df["Month"] < 10) ]["Weekly_Sales"].sum() p = df[ (df["Store"] == d) & (df["Year"] == 2012) & (df["Month"] > 3) & (df["Month"] < 7) ]["Weekly_Sales"].sum() return (f - p) / p df1["GrowthR"] = df1["Store"].apply(goodQ312) plt.figure(figsize=(12, 6)) sns.barplot(x=df1["Store"], y=df1["GrowthR"]) plt.tight_layout()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/678/69678067.ipynb
retail-analysis-with-walmart-sales-data
rutuspatel
[{"Id": 69678067, "ScriptId": 18976593, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5951123, "CreationDate": "08/02/2021 18:00:13", "VersionNumber": 3.0, "Title": "Walmart", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 115.0, "LinesInsertedFromPrevious": 57.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 58.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93147171, "KernelVersionId": 69678067, "SourceDatasetVersionId": 2483292}]
[{"Id": 2483292, "DatasetId": 1503066, "DatasourceVersionId": 2525840, "CreatorUserId": 2727849, "LicenseName": "Unknown", "CreationDate": "07/31/2021 09:04:18", "VersionNumber": 1.0, "Title": "RETAIL ANALYSIS WITH WALMART SALES DATA", "Slug": "retail-analysis-with-walmart-sales-data", "Subtitle": "Determine factors affecting the sales.", "Description": "Historical sales data for 45 Walmart stores located in different regions are available. There\nare certain events and holidays which impact sales on each day. The business is facing a\nchallenge due to unforeseen demands and runs out of stock some times, due to\ninappropriate machine learning algorithm. Walmart would like to predict the sales and\ndemand accurately. An ideal ML algorithm will predict demand accurately and ingest\nfactors like economic conditions including CPI, Unemployment Index, etc. The objective is\nto determine the factors affecting the sales and to analyze the impact of markdowns\naround holidays on the sales.\n\n**Holiday Events**\nSuper Bowl: 12-Feb-10, 11-Feb-11, 10-Feb-12, 8-Feb-13\nLabour Day: 10-Sep-10, 9-Sep-11, 7-Sep-12, 6-Sep-13\nThanksgiving: 26-Nov-10, 25-Nov-11, 23-Nov-12, 29-Nov-13\nChristmas: 31-Dec-10, 30-Dec-11, 28-Dec-12, 27-Dec-13\n\n**Analysis Tasks**\n\n**Basic Statistics tasks**\n1) Which store has maximum sales\n\n2) Which store has maximum standard deviation i.e., the sales vary a lot. Also, find out the coefficient of mean to standard deviation\n\n3) Which store/s has good quarterly growth rate in Q3\u20192012\n\n4) Some holidays have a negative impact on sales. Find out holidays which have higher sales than the mean sales in non-holiday season for all stores together\n\n5) Provide a monthly and semester view of sales in units and give insights\n\n**Statistical Model**\n For Store 1 \u2013 Build prediction models to forecast demand (Linear Regression \u2013 Utilize variables like date and restructure dates as 1 for 5 Feb 2010 (starting from the earliest date in order). Hypothesize if CPI, unemployment, and fuel price have any impact on sales.) Change dates into days by creating new variable.\nSelect the model which gives best accuracy.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1503066, "CreatorUserId": 2727849, "OwnerUserId": 2727849.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2483292.0, "CurrentDatasourceVersionId": 2525840.0, "ForumId": 1522805, "Type": 2, "CreationDate": "07/31/2021 09:04:18", "LastActivityDate": "07/31/2021", "TotalViews": 30516, "TotalDownloads": 3741, "TotalVotes": 34, "TotalKernels": 9}]
[{"Id": 2727849, "UserName": "rutuspatel", "DisplayName": "Rutu Patel", "RegisterDate": "01/21/2019", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv( r"/kaggle/input/retail-analysis-with-walmart-sales-data/WALMART_SALES_DATA.csv" ) df # # **EDA** import matplotlib.pyplot as plt import seaborn as sns sns.heatmap(df.isnull(), yticklabels=False, cbar=False) df.columns df.describe() sns.heatmap(df.corr(), annot=True) sns.scatterplot( x="Weekly_Sales", y="CPI", palette="ch:r=-.2,d=.3_r", hue="Holiday_Flag", sizes=(1, 8), linewidth=0, data=df, ) sns.pairplot(df) df["Date"] def ss(d): k = [] k = d.split("-") return int(k[0]) df["Day"] = df["Date"].apply(ss) df["Year"] = pd.DatetimeIndex(df["Date"]).year df["Month"] = pd.DatetimeIndex(df["Date"]).month df.drop(["year", "month"], inplace=True, axis=1) df plt.figure(figsize=(10, 10)) sns.heatmap(df.corr(), annot=True) df.drop("Day", axis=1, inplace=True) plt.figure(figsize=(20, 20)) df[["Temperature", "Weekly_Sales"]].hist() sns.relplot(x=df["Store"], y=df["Weekly_Sales"], kind="line") df1 = pd.DataFrame() df1["Store"] = df["Store"].unique() df[df["Store"] == 1]["Weekly_Sales"].sum() def tot_sales(d): k = df[df["Store"] == d]["Weekly_Sales"].sum() df1["Total_Sale"] = df1["Store"].apply( lambda x: df[df["Store"] == x]["Weekly_Sales"].sum() ) df1[df1["Total_Sale"] == df1["Total_Sale"].max()] plt.figure(figsize=(12, 6)) sns.barplot(x=df1["Store"], y=df1["Total_Sale"]) plt.tight_layout() sns.relplot(x=df1["Store"], y=df1["Total_Sale"], kind="line") # Store 20 max sales df1["STD"] = df1["Store"].apply(lambda x: df[df["Store"] == x]["Weekly_Sales"].std()) plt.figure(figsize=(12, 6)) sns.barplot(x=df1["Store"], y=df1["STD"]) plt.tight_layout() # Store #14 has max std def goodQ312(d): f = df[ (df["Store"] == d) & (df["Year"] == 2012) & (df["Month"] > 6) & (df["Month"] < 10) ]["Weekly_Sales"].sum() p = df[ (df["Store"] == d) & (df["Year"] == 2012) & (df["Month"] > 3) & (df["Month"] < 7) ]["Weekly_Sales"].sum() return (f - p) / p df1["GrowthR"] = df1["Store"].apply(goodQ312) plt.figure(figsize=(12, 6)) sns.barplot(x=df1["Store"], y=df1["GrowthR"]) plt.tight_layout()
[{"retail-analysis-with-walmart-sales-data/WALMART_SALES_DATA.csv": {"column_names": "[\"Store\", \"Date\", \"Weekly_Sales\", \"Holiday_Flag\", \"Temperature\", \"Fuel_Price\", \"CPI\", \"Unemployment\"]", "column_data_types": "{\"Store\": \"int64\", \"Date\": \"object\", \"Weekly_Sales\": \"float64\", \"Holiday_Flag\": \"int64\", \"Temperature\": \"float64\", \"Fuel_Price\": \"float64\", \"CPI\": \"float64\", \"Unemployment\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6435 entries, 0 to 6434\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Store 6435 non-null int64 \n 1 Date 6435 non-null object \n 2 Weekly_Sales 6435 non-null float64\n 3 Holiday_Flag 6435 non-null int64 \n 4 Temperature 6435 non-null float64\n 5 Fuel_Price 6435 non-null float64\n 6 CPI 6435 non-null float64\n 7 Unemployment 6435 non-null float64\ndtypes: float64(5), int64(2), object(1)\nmemory usage: 402.3+ KB\n", "summary": "{\"Store\": {\"count\": 6435.0, \"mean\": 23.0, \"std\": 12.988182381175474, \"min\": 1.0, \"25%\": 12.0, \"50%\": 23.0, \"75%\": 34.0, \"max\": 45.0}, \"Weekly_Sales\": {\"count\": 6435.0, \"mean\": 1046964.8775617715, \"std\": 564366.6220536975, \"min\": 209986.25, \"25%\": 553350.105, \"50%\": 960746.04, \"75%\": 1420158.66, \"max\": 3818686.45}, \"Holiday_Flag\": {\"count\": 6435.0, \"mean\": 0.06993006993006994, \"std\": 0.25504894436982795, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"Temperature\": {\"count\": 6435.0, \"mean\": 60.66378243978244, \"std\": 18.44493287581158, \"min\": -2.06, \"25%\": 47.46, \"50%\": 62.67, \"75%\": 74.94, \"max\": 100.14}, \"Fuel_Price\": {\"count\": 6435.0, \"mean\": 3.358606837606838, \"std\": 0.4590197071928525, \"min\": 2.472, \"25%\": 2.933, \"50%\": 3.445, \"75%\": 3.735, \"max\": 4.468}, \"CPI\": {\"count\": 6435.0, \"mean\": 171.57839384878014, \"std\": 39.35671229566418, \"min\": 126.064, \"25%\": 131.735, \"50%\": 182.6165205, \"75%\": 212.74329345, \"max\": 227.2328068}, \"Unemployment\": {\"count\": 6435.0, \"mean\": 7.99915104895105, \"std\": 1.8758847818627977, \"min\": 3.879, \"25%\": 6.891, \"50%\": 7.874, \"75%\": 8.622, \"max\": 14.313}}", "examples": "{\"Store\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\"Date\":{\"0\":\"05-02-2010\",\"1\":\"12-02-2010\",\"2\":\"19-02-2010\",\"3\":\"26-02-2010\"},\"Weekly_Sales\":{\"0\":1643690.8999999999,\"1\":1641957.4399999999,\"2\":1611968.1699999999,\"3\":1409727.5900000001},\"Holiday_Flag\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"Temperature\":{\"0\":42.31,\"1\":38.51,\"2\":39.93,\"3\":46.63},\"Fuel_Price\":{\"0\":2.572,\"1\":2.548,\"2\":2.514,\"3\":2.561},\"CPI\":{\"0\":211.0963582,\"1\":211.2421698,\"2\":211.2891429,\"3\":211.3196429},\"Unemployment\":{\"0\":8.106,\"1\":8.106,\"2\":8.106,\"3\":8.106}}"}}]
true
1
<start_data_description><data_path>retail-analysis-with-walmart-sales-data/WALMART_SALES_DATA.csv: <column_names> ['Store', 'Date', 'Weekly_Sales', 'Holiday_Flag', 'Temperature', 'Fuel_Price', 'CPI', 'Unemployment'] <column_types> {'Store': 'int64', 'Date': 'object', 'Weekly_Sales': 'float64', 'Holiday_Flag': 'int64', 'Temperature': 'float64', 'Fuel_Price': 'float64', 'CPI': 'float64', 'Unemployment': 'float64'} <dataframe_Summary> {'Store': {'count': 6435.0, 'mean': 23.0, 'std': 12.988182381175474, 'min': 1.0, '25%': 12.0, '50%': 23.0, '75%': 34.0, 'max': 45.0}, 'Weekly_Sales': {'count': 6435.0, 'mean': 1046964.8775617715, 'std': 564366.6220536975, 'min': 209986.25, '25%': 553350.105, '50%': 960746.04, '75%': 1420158.66, 'max': 3818686.45}, 'Holiday_Flag': {'count': 6435.0, 'mean': 0.06993006993006994, 'std': 0.25504894436982795, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'Temperature': {'count': 6435.0, 'mean': 60.66378243978244, 'std': 18.44493287581158, 'min': -2.06, '25%': 47.46, '50%': 62.67, '75%': 74.94, 'max': 100.14}, 'Fuel_Price': {'count': 6435.0, 'mean': 3.358606837606838, 'std': 0.4590197071928525, 'min': 2.472, '25%': 2.933, '50%': 3.445, '75%': 3.735, 'max': 4.468}, 'CPI': {'count': 6435.0, 'mean': 171.57839384878014, 'std': 39.35671229566418, 'min': 126.064, '25%': 131.735, '50%': 182.6165205, '75%': 212.74329345, 'max': 227.2328068}, 'Unemployment': {'count': 6435.0, 'mean': 7.99915104895105, 'std': 1.8758847818627977, 'min': 3.879, '25%': 6.891, '50%': 7.874, '75%': 8.622, 'max': 14.313}} <dataframe_info> RangeIndex: 6435 entries, 0 to 6434 Data columns (total 8 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Store 6435 non-null int64 1 Date 6435 non-null object 2 Weekly_Sales 6435 non-null float64 3 Holiday_Flag 6435 non-null int64 4 Temperature 6435 non-null float64 5 Fuel_Price 6435 non-null float64 6 CPI 6435 non-null float64 7 Unemployment 6435 non-null float64 dtypes: float64(5), int64(2), object(1) memory usage: 402.3+ KB <some_examples> {'Store': {'0': 1, '1': 1, '2': 1, '3': 1}, 'Date': {'0': '05-02-2010', '1': '12-02-2010', '2': '19-02-2010', '3': '26-02-2010'}, 'Weekly_Sales': {'0': 1643690.9, '1': 1641957.44, '2': 1611968.17, '3': 1409727.59}, 'Holiday_Flag': {'0': 0, '1': 1, '2': 0, '3': 0}, 'Temperature': {'0': 42.31, '1': 38.51, '2': 39.93, '3': 46.63}, 'Fuel_Price': {'0': 2.572, '1': 2.548, '2': 2.514, '3': 2.561}, 'CPI': {'0': 211.0963582, '1': 211.2421698, '2': 211.2891429, '3': 211.3196429}, 'Unemployment': {'0': 8.106, '1': 8.106, '2': 8.106, '3': 8.106}} <end_description>
1,047
0
2,373
1,047
69678376
import numpy as np import pandas as pd pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) # Equation for Bayes theorem # Marginal Probability: The probability of an event irrespective of the outcomes of other random variables, e.g. P(A). # Joint Probability: Probability of two (or more) simultaneous events, e.g. P(A and B) or P(A, B). # Conditional Probability: Probability of one (or more) event given the occurrence of another event, e.g. P(A given B) or P(A | B). # P(A|B): Posterior probability. # P(A): Prior probability. # P(B|A): Likelihood. # P(B): Evidence. # Bayes Theorem to be restated as: # Posterior = Likelihood * Prior / Evidence # P(A|B) = P(B|A) * P(A) / P(B) # P(A|B,C)= P(B|A)*P(C|A)*P(A) # What is success for a clinical trial? # # Definition 1: Completing the phase. Therefore depends on the trials recruitment and expanded access status(availible, completed, withdrawn...). Also disease and intervetion type are both important since we want to investigate the probability regarding them. dataframe = pd.read_csv( "../input/disease-groupped-dataframe/disease_groupped_dataframe.csv" ) data = dataframe.copy() data[["overall_status", "intervention_type"]].groupby(["intervention_type"]).agg( ["count"] ) data2 = data.copy() status_map = { "Unknown status": 0, "Terminated": 0, "Completed": 1, "Recruiting": 0, "Not yet recruiting": 0, "Withdrawn": 0, "Suspended": 0, "Approved for marketing": 1, "No longer available": 0, "Active, not recruiting": 0, "Enrolling by invitation": 0, "Available": 1, } data2["overall_status"] = data2["overall_status"].map(status_map).astype("int") data2[["overall_status", "intervention_type"]].groupby(["overall_status"]).agg( ["count"] ) # P(overall status|intervention type) = P(status).P(intervention type|overall status)/P(intervention type) # disease and status data2.sample(3) data2["phase"] = data2["phase"].fillna( "None" ) # I fill the empty phases as none, instead filling with mode or imputation. Because some trials may not have a phase. data2["enrollment_type"] = data2["enrollment_type"].fillna( data2["enrollment_type"].mode()[0] ) # enrollment type only has 2 option actual or anticipated, therefore I fill NaN values with mode data2["responsible_party_type"] = data2["responsible_party_type"].fillna( data2["responsible_party_type"].mode()[0] ) # responsible party type has 3 options, sponsor, principal investigator or sponsor-investigator # therefore NaN values are actually missing, so I filled with mode data2["number_of_arms"].fillna(value=data2["number_of_arms"].mean(), inplace=True) data2["enrollment"].fillna(value=data2["enrollment"].median(), inplace=True) data2.columns some_columns = [ "nct_id", "study_type", "overall_status", "phase", "enrollment_type", "intervention_type", "Condition_SHORT", ] data3 = data2[some_columns] data3.sample(3) data3.isnull().sum() # 0 def bayesian_success( daraframe, success_column, success_variable, target_column, target_variable ): p_a = daraframe.loc[(daraframe[success_column] == success_variable)][ "nct_id" ].count() p_a_round = float("{:.4f}".format(p_a)) p_b = daraframe.loc[(daraframe[target_column] == target_variable)]["nct_id"].count() p_b_round = float("{:.4f}".format(p_b)) pb_a = ( daraframe.loc[ (daraframe[success_column] == success_variable) & (daraframe[target_column] == target_variable) ]["nct_id"].count() / daraframe["nct_id"].count() ) pb_a_round = float("{:.4f}".format(pb_a)) pa_b = p_a_round * pb_a_round / p_b_round pa_b_round = float("{:.4f}".format(pa_b)) return pa_b_round bayesian_success(data3, "overall_status", "Completed", "Condition_SHORT", "Respiratory") def bayesian_success( daraframe, success_column, success_variable, target_column, target_variable ): p_a = daraframe.loc[(daraframe[success_column] == success_variable)][ "nct_id" ].count() p_b = daraframe.loc[(daraframe[target_column] == target_variable)]["nct_id"].count() pb_a_df = daraframe.loc[ (daraframe[success_column] == success_variable) & (daraframe[target_column] == target_variable) ] pb_a = pb_a_df["nct_id"].count() / daraframe["nct_id"].count() pa_b = p_a * pb_a / p_b return pa_b def bayesian_success( daraframe, success_column, success_variable, target_column, target_variable ): p_a = daraframe.loc[(daraframe[success_column] == success_variable)][ "nct_id" ].count() p_a_round = float("{:.2f}".format(p_a)) p_b = daraframe.loc[(daraframe[target_column] == target_variable)]["nct_id"].count() p_b_round = float("{:.1f}".format(p_b)) pb_a = ( daraframe.loc[ (daraframe[success_column] == success_variable) & (daraframe[target_column] == target_variable) ]["nct_id"].count() / daraframe["nct_id"].count() ) pb_a_round = float("{:.2f}".format(pb_a)) pa_b = p_a_round * pb_a_round # /p_b pa_b_round = float("{:.2f}".format(pa_b)) return pa_b_round bayesian_success(data3, "overall_status", 1, "study_type", "Interventional") def bayesian_success( daraframe, success_column, success_variable, target_column, target_variable ): p_a = ( daraframe.loc[(daraframe["overall_status"] == 1)]["nct_id"].count() / daraframe["nct_id"].count() ) p_a_round = float("{:.2f}".format(p_a)) p_b = ( daraframe.loc[(daraframe["study_type"] == "Interventional")]["nct_id"].count() / daraframe["nct_id"].count() ) p_b_round = float("{:.1f}".format(p_b)) pb_a = ( daraframe.loc[ (daraframe[success_column] == success_variable) & (daraframe[target_column] == target_variable) ]["nct_id"].count() / daraframe["nct_id"].count() ) pb_a_round = float("{:.2f}".format(pb_a)) pa_b = p_a_round * pb_a_round # /p_b pa_b_round = float("{:.2f}".format(pa_b)) return pa_b_round
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/678/69678376.ipynb
null
null
[{"Id": 69678376, "ScriptId": 19010916, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6880870, "CreationDate": "08/02/2021 18:02:48", "VersionNumber": 1.0, "Title": "Bayes Success Rate", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 102.0, "LinesInsertedFromPrevious": 102.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np import pandas as pd pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) # Equation for Bayes theorem # Marginal Probability: The probability of an event irrespective of the outcomes of other random variables, e.g. P(A). # Joint Probability: Probability of two (or more) simultaneous events, e.g. P(A and B) or P(A, B). # Conditional Probability: Probability of one (or more) event given the occurrence of another event, e.g. P(A given B) or P(A | B). # P(A|B): Posterior probability. # P(A): Prior probability. # P(B|A): Likelihood. # P(B): Evidence. # Bayes Theorem to be restated as: # Posterior = Likelihood * Prior / Evidence # P(A|B) = P(B|A) * P(A) / P(B) # P(A|B,C)= P(B|A)*P(C|A)*P(A) # What is success for a clinical trial? # # Definition 1: Completing the phase. Therefore depends on the trials recruitment and expanded access status(availible, completed, withdrawn...). Also disease and intervetion type are both important since we want to investigate the probability regarding them. dataframe = pd.read_csv( "../input/disease-groupped-dataframe/disease_groupped_dataframe.csv" ) data = dataframe.copy() data[["overall_status", "intervention_type"]].groupby(["intervention_type"]).agg( ["count"] ) data2 = data.copy() status_map = { "Unknown status": 0, "Terminated": 0, "Completed": 1, "Recruiting": 0, "Not yet recruiting": 0, "Withdrawn": 0, "Suspended": 0, "Approved for marketing": 1, "No longer available": 0, "Active, not recruiting": 0, "Enrolling by invitation": 0, "Available": 1, } data2["overall_status"] = data2["overall_status"].map(status_map).astype("int") data2[["overall_status", "intervention_type"]].groupby(["overall_status"]).agg( ["count"] ) # P(overall status|intervention type) = P(status).P(intervention type|overall status)/P(intervention type) # disease and status data2.sample(3) data2["phase"] = data2["phase"].fillna( "None" ) # I fill the empty phases as none, instead filling with mode or imputation. Because some trials may not have a phase. data2["enrollment_type"] = data2["enrollment_type"].fillna( data2["enrollment_type"].mode()[0] ) # enrollment type only has 2 option actual or anticipated, therefore I fill NaN values with mode data2["responsible_party_type"] = data2["responsible_party_type"].fillna( data2["responsible_party_type"].mode()[0] ) # responsible party type has 3 options, sponsor, principal investigator or sponsor-investigator # therefore NaN values are actually missing, so I filled with mode data2["number_of_arms"].fillna(value=data2["number_of_arms"].mean(), inplace=True) data2["enrollment"].fillna(value=data2["enrollment"].median(), inplace=True) data2.columns some_columns = [ "nct_id", "study_type", "overall_status", "phase", "enrollment_type", "intervention_type", "Condition_SHORT", ] data3 = data2[some_columns] data3.sample(3) data3.isnull().sum() # 0 def bayesian_success( daraframe, success_column, success_variable, target_column, target_variable ): p_a = daraframe.loc[(daraframe[success_column] == success_variable)][ "nct_id" ].count() p_a_round = float("{:.4f}".format(p_a)) p_b = daraframe.loc[(daraframe[target_column] == target_variable)]["nct_id"].count() p_b_round = float("{:.4f}".format(p_b)) pb_a = ( daraframe.loc[ (daraframe[success_column] == success_variable) & (daraframe[target_column] == target_variable) ]["nct_id"].count() / daraframe["nct_id"].count() ) pb_a_round = float("{:.4f}".format(pb_a)) pa_b = p_a_round * pb_a_round / p_b_round pa_b_round = float("{:.4f}".format(pa_b)) return pa_b_round bayesian_success(data3, "overall_status", "Completed", "Condition_SHORT", "Respiratory") def bayesian_success( daraframe, success_column, success_variable, target_column, target_variable ): p_a = daraframe.loc[(daraframe[success_column] == success_variable)][ "nct_id" ].count() p_b = daraframe.loc[(daraframe[target_column] == target_variable)]["nct_id"].count() pb_a_df = daraframe.loc[ (daraframe[success_column] == success_variable) & (daraframe[target_column] == target_variable) ] pb_a = pb_a_df["nct_id"].count() / daraframe["nct_id"].count() pa_b = p_a * pb_a / p_b return pa_b def bayesian_success( daraframe, success_column, success_variable, target_column, target_variable ): p_a = daraframe.loc[(daraframe[success_column] == success_variable)][ "nct_id" ].count() p_a_round = float("{:.2f}".format(p_a)) p_b = daraframe.loc[(daraframe[target_column] == target_variable)]["nct_id"].count() p_b_round = float("{:.1f}".format(p_b)) pb_a = ( daraframe.loc[ (daraframe[success_column] == success_variable) & (daraframe[target_column] == target_variable) ]["nct_id"].count() / daraframe["nct_id"].count() ) pb_a_round = float("{:.2f}".format(pb_a)) pa_b = p_a_round * pb_a_round # /p_b pa_b_round = float("{:.2f}".format(pa_b)) return pa_b_round bayesian_success(data3, "overall_status", 1, "study_type", "Interventional") def bayesian_success( daraframe, success_column, success_variable, target_column, target_variable ): p_a = ( daraframe.loc[(daraframe["overall_status"] == 1)]["nct_id"].count() / daraframe["nct_id"].count() ) p_a_round = float("{:.2f}".format(p_a)) p_b = ( daraframe.loc[(daraframe["study_type"] == "Interventional")]["nct_id"].count() / daraframe["nct_id"].count() ) p_b_round = float("{:.1f}".format(p_b)) pb_a = ( daraframe.loc[ (daraframe[success_column] == success_variable) & (daraframe[target_column] == target_variable) ]["nct_id"].count() / daraframe["nct_id"].count() ) pb_a_round = float("{:.2f}".format(pb_a)) pa_b = p_a_round * pb_a_round # /p_b pa_b_round = float("{:.2f}".format(pa_b)) return pa_b_round
false
0
2,067
0
2,067
2,067
69678170
<jupyter_start><jupyter_text>Campus Recruitment # Hello My name is Ben Roshan D, doing MBA in Business Analytics at Jain University Bangalore . We have practical sessions in Python,R as subjects. Faculties provide us with such data sets to work on with it, So here is one of the data set which our class worked on # What is in it? This data set consists of Placement data of students in a XYZ campus. It includes secondary and higher secondary school percentage and specialization. It also includes degree specialization, type and Work experience and salary offers to the placed students # Acknowledgement I would like to thank Dr. Dhimant Ganatara, Professor Jain University for helping the students by providing this data for us to train R programming # Questions 1. Which factor influenced a candidate in getting placed? 2. Does percentage matters for one to get placed? 3. Which degree specialization is much demanded by corporate? 4. Play with the data conducting all statistical tests. Kaggle dataset identifier: factors-affecting-campus-placement <jupyter_code>import pandas as pd df = pd.read_csv('factors-affecting-campus-placement/Placement_Data_Full_Class.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 215 entries, 0 to 214 Data columns (total 15 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 sl_no 215 non-null int64 1 gender 215 non-null object 2 ssc_p 215 non-null float64 3 ssc_b 215 non-null object 4 hsc_p 215 non-null float64 5 hsc_b 215 non-null object 6 hsc_s 215 non-null object 7 degree_p 215 non-null float64 8 degree_t 215 non-null object 9 workex 215 non-null object 10 etest_p 215 non-null float64 11 specialisation 215 non-null object 12 mba_p 215 non-null float64 13 status 215 non-null object 14 salary 148 non-null float64 dtypes: float64(6), int64(1), object(8) memory usage: 25.3+ KB <jupyter_text>Examples: { "sl_no": 1, "gender": "M", "ssc_p": 67.0, "ssc_b": "Others", "hsc_p": 91.0, "hsc_b": "Others", "hsc_s": "Commerce", "degree_p": 58.0, "degree_t": "Sci&Tech", "workex": "No", "etest_p": 55.0, "specialisation": "Mkt&HR", "mba_p": 58.8, "status": "Placed", "salary": 270000.0 } { "sl_no": 2, "gender": "M", "ssc_p": 79.33, "ssc_b": "Central", "hsc_p": 78.33, "hsc_b": "Others", "hsc_s": "Science", "degree_p": 77.48, "degree_t": "Sci&Tech", "workex": "Yes", "etest_p": 86.5, "specialisation": "Mkt&Fin", "mba_p": 66.28, "status": "Placed", "salary": 200000.0 } { "sl_no": 3, "gender": "M", "ssc_p": 65.0, "ssc_b": "Central", "hsc_p": 68.0, "hsc_b": "Central", "hsc_s": "Arts", "degree_p": 64.0, "degree_t": "Comm&Mgmt", "workex": "No", "etest_p": 75.0, "specialisation": "Mkt&Fin", "mba_p": 57.8, "status": "Placed", "salary": 250000.0 } { "sl_no": 4, "gender": "M", "ssc_p": 56.0, "ssc_b": "Central", "hsc_p": 52.0, "hsc_b": "Central", "hsc_s": "Science", "degree_p": 52.0, "degree_t": "Sci&Tech", "workex": "No", "etest_p": 66.0, "specialisation": "Mkt&HR", "mba_p": 59.43, "status": "Not Placed", "salary": NaN } <jupyter_script>import pandas as pd import numpy as np from matplotlib import pyplot import matplotlib.pyplot as plt from datetime import datetime # To Know the Running Time from sklearn.model_selection import train_test_split # Data Splitting from sklearn import preprocessing from sklearn.preprocessing import StandardScaler # Data Standadization from sklearn.preprocessing import MinMaxScaler # Min-Max Data Normalization from imblearn.over_sampling import ADASYN # Oversampling Data with ADASYN from sklearn.linear_model import LogisticRegression # Logistic regression algorithm # Read Dataset df = pd.read_csv( "../input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv" ) # Data Summary df.head() # Drop Kolom Sallary df1 = df.drop(["sl_no", "salary"], axis=1) # Count the Categorical Data cat_cols = df1.select_dtypes(include=object).columns.tolist() ( pd.DataFrame( df[cat_cols].melt(var_name="column", value_name="value").value_counts() ) .rename(columns={0: "counts"}) .sort_values(by=["column", "counts"]) ) # Convert Categorical Data to Integer df1["degree_t"] = ( df1["degree_t"] .replace(["Others"], 1) .replace(["Sci&Tech"], 2) .replace(["Comm&Mgmt"], 3) ) df1["gender"] = df1["gender"].replace(["F"], 1).replace(["M"], 2) df1["hsc_b"] = df1["hsc_b"].replace(["Central"], 1).replace(["Others"], 2) df1["hsc_s"] = ( df1["hsc_s"].replace(["Arts"], 1).replace(["Science"], 2).replace(["Commerce"], 2) ) df1["specialisation"] = ( df1["specialisation"].replace(["Mkt&HR"], 1).replace(["Mkt&Fin"], 2) ) df1["ssc_b"] = df1["ssc_b"].replace(["Others"], 1).replace(["Central"], 2) df1["workex"] = df1["workex"].replace(["Yes"], 1).replace(["No"], 2) df1["status"] = df1["status"].replace(["Not Placed"], 0).replace(["Placed"], 1) df1.head() print(df1.dtypes) # **Classification Model Build Start** # Data Split x = df1.drop("status", axis=1).values y = df1["status"].values x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=None ) # Oversampling Data ada = ADASYN(sampling_strategy="auto", random_state=27) x_train, y_train = ada.fit_resample(x_train, y_train) # Min-Max Data Scalling scaler = MinMaxScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) # Min-Max Data Scalling scaler = StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) start = datetime.now() # Logistic Regression lr = LogisticRegression() lr.fit(x_train, y_train) lr_yhat = lr.predict(x_test) end = datetime.now() time_taken = end - start print("Time: ", time_taken) import itertools # advanced tools import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix # Confusion Matrix # Defining the plot function def plot_confusion_matrix(cm, classes, title, normalize=False, cmap=plt.cm.Blues): title = "Confusion Matrix of {}".format(title) if normalize: cm = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation="nearest", cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = ".2f" if normalize else "d" thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text( j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", ) plt.tight_layout() plt.ylabel("True label") plt.xlabel("Predicted label") # Compute confusion matrix for the models lr_matrix = confusion_matrix(y_test, lr_yhat, labels=[0, 1]) # Logistic Regression # Plot the confusion matrix plt.rcParams["figure.figsize"] = (6, 6) # Logistic regression lr_cm_plot = plot_confusion_matrix( lr_matrix, classes=["Negative(0)", "Positive(1)"], normalize=False, title="Logistic Regression", ) plt.savefig("lr_cm_plot.png") plt.show() # get importance importance = lr.coef_[0] feature_names = [ "gender", "ssc_p", "ssc_b", "hsc_p", "hsc_b", "hsc_s", "degree_p", "degree_t", "workex", "etest_p", "specialisation", "mba_p", ] feature_importance = pd.DataFrame(feature_names, columns=["feature"]) feature_importance["importance"] = importance feature_importance = feature_importance.sort_values(by=["importance"], ascending=True) from sklearn.linear_model import LogisticRegression ax = feature_importance.plot.barh(x="feature", y="importance") plt.show() # **Most Wanted Industry by Industry** # Drop Unnecessary Column df2 = df.drop(["sl_no", "salary"], axis=1) # Drop the Unemployed df2 = df2[df2.status != "Not Placed"] # Mathematical Statement mkthr = round( (df2.specialisation == "Mkt&HR").sum() / (df.specialisation == "Mkt&HR").sum(), 2 ) mktfin = round( (df2.specialisation == "Mkt&Fin").sum() / (df.specialisation == "Mkt&Fin").sum(), 2 ) # Print print("Percentage Placed of Each Degree") print("Marketing & HR Specialization - Employment Rate {}".format(mkthr)) print("Marketing & Financial - Employment Rate {}".format(mktfin)) # Bar Chart x = ["Marketing & HR", "Marketing & Fin"] energy = [mkthr, mktfin] x_pos = [i for i, _ in enumerate(x)] plt.bar(x_pos, energy) plt.xticks(x_pos, x) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/678/69678170.ipynb
factors-affecting-campus-placement
benroshan
[{"Id": 69678170, "ScriptId": 19037390, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8047660, "CreationDate": "08/02/2021 18:01:01", "VersionNumber": 1.0, "Title": "Campust Employment Factors", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 150.0, "LinesInsertedFromPrevious": 150.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
[{"Id": 93147318, "KernelVersionId": 69678170, "SourceDatasetVersionId": 1073629}]
[{"Id": 1073629, "DatasetId": 596958, "DatasourceVersionId": 1103331, "CreatorUserId": 4683527, "LicenseName": "CC0: Public Domain", "CreationDate": "04/11/2020 11:09:02", "VersionNumber": 1.0, "Title": "Campus Recruitment", "Slug": "factors-affecting-campus-placement", "Subtitle": "Academic and Employability Factors influencing placement", "Description": "# Hello \n\nMy name is Ben Roshan D, doing MBA in Business Analytics at Jain University Bangalore . We have practical sessions in Python,R as subjects. Faculties provide us with such data sets to work on with it, So here is one of the data set which our class worked on\n\n\n# What is in it?\n\nThis data set consists of Placement data of students in a XYZ campus. It includes secondary and higher secondary school percentage and specialization. It also includes degree specialization, type and Work experience and salary offers to the placed students\n\n\n# Acknowledgement\n\nI would like to thank Dr. Dhimant Ganatara, Professor Jain University for helping the students by providing this data for us to train R programming \n\n\n# Questions\n\n1. Which factor influenced a candidate in getting placed?\n2. Does percentage matters for one to get placed?\n3. Which degree specialization is much demanded by corporate?\n4. Play with the data conducting all statistical tests.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 596958, "CreatorUserId": 4683527, "OwnerUserId": 4683527.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1073629.0, "CurrentDatasourceVersionId": 1103331.0, "ForumId": 610915, "Type": 2, "CreationDate": "04/11/2020 11:09:02", "LastActivityDate": "04/11/2020", "TotalViews": 398355, "TotalDownloads": 53059, "TotalVotes": 939, "TotalKernels": 485}]
[{"Id": 4683527, "UserName": "benroshan", "DisplayName": "Ben Roshan", "RegisterDate": "03/17/2020", "PerformanceTier": 2}]
import pandas as pd import numpy as np from matplotlib import pyplot import matplotlib.pyplot as plt from datetime import datetime # To Know the Running Time from sklearn.model_selection import train_test_split # Data Splitting from sklearn import preprocessing from sklearn.preprocessing import StandardScaler # Data Standadization from sklearn.preprocessing import MinMaxScaler # Min-Max Data Normalization from imblearn.over_sampling import ADASYN # Oversampling Data with ADASYN from sklearn.linear_model import LogisticRegression # Logistic regression algorithm # Read Dataset df = pd.read_csv( "../input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv" ) # Data Summary df.head() # Drop Kolom Sallary df1 = df.drop(["sl_no", "salary"], axis=1) # Count the Categorical Data cat_cols = df1.select_dtypes(include=object).columns.tolist() ( pd.DataFrame( df[cat_cols].melt(var_name="column", value_name="value").value_counts() ) .rename(columns={0: "counts"}) .sort_values(by=["column", "counts"]) ) # Convert Categorical Data to Integer df1["degree_t"] = ( df1["degree_t"] .replace(["Others"], 1) .replace(["Sci&Tech"], 2) .replace(["Comm&Mgmt"], 3) ) df1["gender"] = df1["gender"].replace(["F"], 1).replace(["M"], 2) df1["hsc_b"] = df1["hsc_b"].replace(["Central"], 1).replace(["Others"], 2) df1["hsc_s"] = ( df1["hsc_s"].replace(["Arts"], 1).replace(["Science"], 2).replace(["Commerce"], 2) ) df1["specialisation"] = ( df1["specialisation"].replace(["Mkt&HR"], 1).replace(["Mkt&Fin"], 2) ) df1["ssc_b"] = df1["ssc_b"].replace(["Others"], 1).replace(["Central"], 2) df1["workex"] = df1["workex"].replace(["Yes"], 1).replace(["No"], 2) df1["status"] = df1["status"].replace(["Not Placed"], 0).replace(["Placed"], 1) df1.head() print(df1.dtypes) # **Classification Model Build Start** # Data Split x = df1.drop("status", axis=1).values y = df1["status"].values x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=None ) # Oversampling Data ada = ADASYN(sampling_strategy="auto", random_state=27) x_train, y_train = ada.fit_resample(x_train, y_train) # Min-Max Data Scalling scaler = MinMaxScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) # Min-Max Data Scalling scaler = StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) start = datetime.now() # Logistic Regression lr = LogisticRegression() lr.fit(x_train, y_train) lr_yhat = lr.predict(x_test) end = datetime.now() time_taken = end - start print("Time: ", time_taken) import itertools # advanced tools import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix # Confusion Matrix # Defining the plot function def plot_confusion_matrix(cm, classes, title, normalize=False, cmap=plt.cm.Blues): title = "Confusion Matrix of {}".format(title) if normalize: cm = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation="nearest", cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = ".2f" if normalize else "d" thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text( j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", ) plt.tight_layout() plt.ylabel("True label") plt.xlabel("Predicted label") # Compute confusion matrix for the models lr_matrix = confusion_matrix(y_test, lr_yhat, labels=[0, 1]) # Logistic Regression # Plot the confusion matrix plt.rcParams["figure.figsize"] = (6, 6) # Logistic regression lr_cm_plot = plot_confusion_matrix( lr_matrix, classes=["Negative(0)", "Positive(1)"], normalize=False, title="Logistic Regression", ) plt.savefig("lr_cm_plot.png") plt.show() # get importance importance = lr.coef_[0] feature_names = [ "gender", "ssc_p", "ssc_b", "hsc_p", "hsc_b", "hsc_s", "degree_p", "degree_t", "workex", "etest_p", "specialisation", "mba_p", ] feature_importance = pd.DataFrame(feature_names, columns=["feature"]) feature_importance["importance"] = importance feature_importance = feature_importance.sort_values(by=["importance"], ascending=True) from sklearn.linear_model import LogisticRegression ax = feature_importance.plot.barh(x="feature", y="importance") plt.show() # **Most Wanted Industry by Industry** # Drop Unnecessary Column df2 = df.drop(["sl_no", "salary"], axis=1) # Drop the Unemployed df2 = df2[df2.status != "Not Placed"] # Mathematical Statement mkthr = round( (df2.specialisation == "Mkt&HR").sum() / (df.specialisation == "Mkt&HR").sum(), 2 ) mktfin = round( (df2.specialisation == "Mkt&Fin").sum() / (df.specialisation == "Mkt&Fin").sum(), 2 ) # Print print("Percentage Placed of Each Degree") print("Marketing & HR Specialization - Employment Rate {}".format(mkthr)) print("Marketing & Financial - Employment Rate {}".format(mktfin)) # Bar Chart x = ["Marketing & HR", "Marketing & Fin"] energy = [mkthr, mktfin] x_pos = [i for i, _ in enumerate(x)] plt.bar(x_pos, energy) plt.xticks(x_pos, x) plt.show()
[{"factors-affecting-campus-placement/Placement_Data_Full_Class.csv": {"column_names": "[\"sl_no\", \"gender\", \"ssc_p\", \"ssc_b\", \"hsc_p\", \"hsc_b\", \"hsc_s\", \"degree_p\", \"degree_t\", \"workex\", \"etest_p\", \"specialisation\", \"mba_p\", \"status\", \"salary\"]", "column_data_types": "{\"sl_no\": \"int64\", \"gender\": \"object\", \"ssc_p\": \"float64\", \"ssc_b\": \"object\", \"hsc_p\": \"float64\", \"hsc_b\": \"object\", \"hsc_s\": \"object\", \"degree_p\": \"float64\", \"degree_t\": \"object\", \"workex\": \"object\", \"etest_p\": \"float64\", \"specialisation\": \"object\", \"mba_p\": \"float64\", \"status\": \"object\", \"salary\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 215 entries, 0 to 214\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 sl_no 215 non-null int64 \n 1 gender 215 non-null object \n 2 ssc_p 215 non-null float64\n 3 ssc_b 215 non-null object \n 4 hsc_p 215 non-null float64\n 5 hsc_b 215 non-null object \n 6 hsc_s 215 non-null object \n 7 degree_p 215 non-null float64\n 8 degree_t 215 non-null object \n 9 workex 215 non-null object \n 10 etest_p 215 non-null float64\n 11 specialisation 215 non-null object \n 12 mba_p 215 non-null float64\n 13 status 215 non-null object \n 14 salary 148 non-null float64\ndtypes: float64(6), int64(1), object(8)\nmemory usage: 25.3+ KB\n", "summary": "{\"sl_no\": {\"count\": 215.0, \"mean\": 108.0, \"std\": 62.20932405998316, \"min\": 1.0, \"25%\": 54.5, \"50%\": 108.0, \"75%\": 161.5, \"max\": 215.0}, \"ssc_p\": {\"count\": 215.0, \"mean\": 67.30339534883721, \"std\": 10.827205398231452, \"min\": 40.89, \"25%\": 60.599999999999994, \"50%\": 67.0, \"75%\": 75.7, \"max\": 89.4}, \"hsc_p\": {\"count\": 215.0, \"mean\": 66.33316279069768, \"std\": 10.89750915750298, \"min\": 37.0, \"25%\": 60.9, \"50%\": 65.0, \"75%\": 73.0, \"max\": 97.7}, \"degree_p\": {\"count\": 215.0, \"mean\": 66.37018604651163, \"std\": 7.35874328733944, \"min\": 50.0, \"25%\": 61.0, \"50%\": 66.0, \"75%\": 72.0, \"max\": 91.0}, \"etest_p\": {\"count\": 215.0, \"mean\": 72.10055813953488, \"std\": 13.275956401653833, \"min\": 50.0, \"25%\": 60.0, \"50%\": 71.0, \"75%\": 83.5, \"max\": 98.0}, \"mba_p\": {\"count\": 215.0, \"mean\": 62.278186046511635, \"std\": 5.833384580683801, \"min\": 51.21, \"25%\": 57.945, \"50%\": 62.0, \"75%\": 66.255, \"max\": 77.89}, \"salary\": {\"count\": 148.0, \"mean\": 288655.4054054054, \"std\": 93457.45241958875, \"min\": 200000.0, \"25%\": 240000.0, \"50%\": 265000.0, \"75%\": 300000.0, \"max\": 940000.0}}", "examples": "{\"sl_no\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"gender\":{\"0\":\"M\",\"1\":\"M\",\"2\":\"M\",\"3\":\"M\"},\"ssc_p\":{\"0\":67.0,\"1\":79.33,\"2\":65.0,\"3\":56.0},\"ssc_b\":{\"0\":\"Others\",\"1\":\"Central\",\"2\":\"Central\",\"3\":\"Central\"},\"hsc_p\":{\"0\":91.0,\"1\":78.33,\"2\":68.0,\"3\":52.0},\"hsc_b\":{\"0\":\"Others\",\"1\":\"Others\",\"2\":\"Central\",\"3\":\"Central\"},\"hsc_s\":{\"0\":\"Commerce\",\"1\":\"Science\",\"2\":\"Arts\",\"3\":\"Science\"},\"degree_p\":{\"0\":58.0,\"1\":77.48,\"2\":64.0,\"3\":52.0},\"degree_t\":{\"0\":\"Sci&Tech\",\"1\":\"Sci&Tech\",\"2\":\"Comm&Mgmt\",\"3\":\"Sci&Tech\"},\"workex\":{\"0\":\"No\",\"1\":\"Yes\",\"2\":\"No\",\"3\":\"No\"},\"etest_p\":{\"0\":55.0,\"1\":86.5,\"2\":75.0,\"3\":66.0},\"specialisation\":{\"0\":\"Mkt&HR\",\"1\":\"Mkt&Fin\",\"2\":\"Mkt&Fin\",\"3\":\"Mkt&HR\"},\"mba_p\":{\"0\":58.8,\"1\":66.28,\"2\":57.8,\"3\":59.43},\"status\":{\"0\":\"Placed\",\"1\":\"Placed\",\"2\":\"Placed\",\"3\":\"Not Placed\"},\"salary\":{\"0\":270000.0,\"1\":200000.0,\"2\":250000.0,\"3\":null}}"}}]
true
1
<start_data_description><data_path>factors-affecting-campus-placement/Placement_Data_Full_Class.csv: <column_names> ['sl_no', 'gender', 'ssc_p', 'ssc_b', 'hsc_p', 'hsc_b', 'hsc_s', 'degree_p', 'degree_t', 'workex', 'etest_p', 'specialisation', 'mba_p', 'status', 'salary'] <column_types> {'sl_no': 'int64', 'gender': 'object', 'ssc_p': 'float64', 'ssc_b': 'object', 'hsc_p': 'float64', 'hsc_b': 'object', 'hsc_s': 'object', 'degree_p': 'float64', 'degree_t': 'object', 'workex': 'object', 'etest_p': 'float64', 'specialisation': 'object', 'mba_p': 'float64', 'status': 'object', 'salary': 'float64'} <dataframe_Summary> {'sl_no': {'count': 215.0, 'mean': 108.0, 'std': 62.20932405998316, 'min': 1.0, '25%': 54.5, '50%': 108.0, '75%': 161.5, 'max': 215.0}, 'ssc_p': {'count': 215.0, 'mean': 67.30339534883721, 'std': 10.827205398231452, 'min': 40.89, '25%': 60.599999999999994, '50%': 67.0, '75%': 75.7, 'max': 89.4}, 'hsc_p': {'count': 215.0, 'mean': 66.33316279069768, 'std': 10.89750915750298, 'min': 37.0, '25%': 60.9, '50%': 65.0, '75%': 73.0, 'max': 97.7}, 'degree_p': {'count': 215.0, 'mean': 66.37018604651163, 'std': 7.35874328733944, 'min': 50.0, '25%': 61.0, '50%': 66.0, '75%': 72.0, 'max': 91.0}, 'etest_p': {'count': 215.0, 'mean': 72.10055813953488, 'std': 13.275956401653833, 'min': 50.0, '25%': 60.0, '50%': 71.0, '75%': 83.5, 'max': 98.0}, 'mba_p': {'count': 215.0, 'mean': 62.278186046511635, 'std': 5.833384580683801, 'min': 51.21, '25%': 57.945, '50%': 62.0, '75%': 66.255, 'max': 77.89}, 'salary': {'count': 148.0, 'mean': 288655.4054054054, 'std': 93457.45241958875, 'min': 200000.0, '25%': 240000.0, '50%': 265000.0, '75%': 300000.0, 'max': 940000.0}} <dataframe_info> RangeIndex: 215 entries, 0 to 214 Data columns (total 15 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 sl_no 215 non-null int64 1 gender 215 non-null object 2 ssc_p 215 non-null float64 3 ssc_b 215 non-null object 4 hsc_p 215 non-null float64 5 hsc_b 215 non-null object 6 hsc_s 215 non-null object 7 degree_p 215 non-null float64 8 degree_t 215 non-null object 9 workex 215 non-null object 10 etest_p 215 non-null float64 11 specialisation 215 non-null object 12 mba_p 215 non-null float64 13 status 215 non-null object 14 salary 148 non-null float64 dtypes: float64(6), int64(1), object(8) memory usage: 25.3+ KB <some_examples> {'sl_no': {'0': 1, '1': 2, '2': 3, '3': 4}, 'gender': {'0': 'M', '1': 'M', '2': 'M', '3': 'M'}, 'ssc_p': {'0': 67.0, '1': 79.33, '2': 65.0, '3': 56.0}, 'ssc_b': {'0': 'Others', '1': 'Central', '2': 'Central', '3': 'Central'}, 'hsc_p': {'0': 91.0, '1': 78.33, '2': 68.0, '3': 52.0}, 'hsc_b': {'0': 'Others', '1': 'Others', '2': 'Central', '3': 'Central'}, 'hsc_s': {'0': 'Commerce', '1': 'Science', '2': 'Arts', '3': 'Science'}, 'degree_p': {'0': 58.0, '1': 77.48, '2': 64.0, '3': 52.0}, 'degree_t': {'0': 'Sci&Tech', '1': 'Sci&Tech', '2': 'Comm&Mgmt', '3': 'Sci&Tech'}, 'workex': {'0': 'No', '1': 'Yes', '2': 'No', '3': 'No'}, 'etest_p': {'0': 55.0, '1': 86.5, '2': 75.0, '3': 66.0}, 'specialisation': {'0': 'Mkt&HR', '1': 'Mkt&Fin', '2': 'Mkt&Fin', '3': 'Mkt&HR'}, 'mba_p': {'0': 58.8, '1': 66.28, '2': 57.8, '3': 59.43}, 'status': {'0': 'Placed', '1': 'Placed', '2': 'Placed', '3': 'Not Placed'}, 'salary': {'0': 270000.0, '1': 200000.0, '2': 250000.0, '3': None}} <end_description>
1,777
1
3,070
1,777
69678546
<jupyter_start><jupyter_text>Ukrainian Open Speech To Text Dataset 4.2 part 2 Type: Dataset Tags: Ukrainian Open Speech To Text Dataset STT Abstract: Speech Recognition for Ukrainian 🇺🇦 The aim of this repository is to collect information and datasets for speech recognition in Ukrainian. Get in touch with us in our Telegram group: https://t.me/speech_recognition_uk Datasets Compiled dataset from different open sources + Companies + Community = 188.31GB / ~1200 hours 💪 License: No license specified, the work may be protected by copyright. https://academictorrents.com/details/fcf8bb60c59e9eb583df003d54ed61776650beb8 Kaggle dataset identifier: ukrainian-open-speech-to-text-dataset-42-part-2 <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import nltk import re from sklearn.model_selection import train_test_split import os import pathlib import matplotlib.pyplot as plt import numpy as np import seaborn as sns import tensorflow as tf from tensorflow.keras.layers.experimental import preprocessing from tensorflow.keras import layers from tensorflow.keras import models from IPython import display from tensorflow.compat.v1 import ConfigProto from tensorflow.compat.v1 import InteractiveSession from collections import Counter config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) # Based on this example: https://www.tensorflow.org/tutorials/audio/simple_audio data = pd.read_csv( "../input/ukrainian-ostotextdataset42-dataframe/Ukrainian_Open_Speech_To_Text Dataset.csv" ) data["number_of_words"] = data["text"].apply(lambda x: len(str(x).split())) data["words"] = data["text"].apply(lambda x: str(x).split()) print(data) pd.set_option("display.max_columns", None) pd.set_option("display.max_colwidth", -1) print(data.shape) print(data.loc[data["number_of_words"] == 1]) datasets = data["dataset"].unique() data = data.dropna() # .fillna(value='', inplace=True) # data=data.loc[data['dataset'] == 'VR/'] data = data.loc[data["dataset"] != "1TVUKRAINIAN/"] data = data.loc[data["dataset"] != "GROSHI/"] data = data.loc[data["dataset"] != "MON/"] data = data.loc[data["number_of_words"] == 1] data = data[["path", "text"]] data["text"] = data["text"].str.replace(",", "-") print("kaggle datasets:", datasets) commands = data["text"].unique() print("commands:", len(commands)) # data=data.dropna()#.fillna(value='', inplace=True) print(len(data)) val_size = 0.2 # 0.2 test_size = 0.1 val_size = round(val_size * len(data)) test_size = round(test_size * len(data)) train_files_pd = data[: len(data) - val_size - test_size] val_files_pd = data[len(data) - val_size - test_size : len(data) - test_size] test_files_pd = data[len(data) - test_size :] train_files = train_files_pd.to_numpy() val_files = val_files_pd.to_numpy() test_files = test_files_pd.to_numpy() print("Training set size", len(train_files)) print("Validation set size", len(val_files)) print("Test set size", len(test_files)) def get_label(file_path): parts = tf.strings.split(file_path, os.path.sep) # Note: You'll use indexing here instead of tuple unpacking to enable this # to work in a TensorFlow graph. return parts[-2] def decode_audio(audio_binary): audio, _ = tf.audio.decode_wav(audio_binary) return tf.squeeze(audio, axis=-1) def get_waveform_and_label(file_path): label = file_path[1] audio_binary = tf.io.read_file(file_path[0]) print(audio_binary) waveform = decode_audio(audio_binary) return waveform, label AUTOTUNE = tf.data.AUTOTUNE files_ds = tf.data.Dataset.from_tensor_slices(train_files) print(files_ds) waveform_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE) print(files_ds) rows = 3 cols = 3 n = rows * cols fig, axes = plt.subplots(rows, cols, figsize=(10, 12)) for i, (audio, label) in enumerate(waveform_ds.take(n)): r = i // cols c = i % cols ax = axes[r][c] ax.plot(audio.numpy()) ax.set_yticks(np.arange(-1.2, 1.2, 0.2)) label = label.numpy().decode("utf-8") ax.set_title(label) plt.show() def get_spectrogram(waveform): # Padding for files with less than 16000 samples # if waveform.get_shape().as_list()[0]: print(([50000] - tf.shape(waveform)) < 0) if ([50000] - tf.shape(waveform)) < 0: waveform = tf.slice(waveform, [0], [50000]) zero_padding = tf.zeros([50000] - tf.shape(waveform), dtype=tf.float32) # print(waveform.get_shape().as_list()[0]) # print(tf.slice(waveform, [0], [100000], name=None # Concatenate audio with padding so that all audio clips will be of the # same length waveform = tf.cast(waveform, tf.float32) equal_length = tf.concat([waveform, zero_padding], 0) spectrogram = tf.signal.stft(equal_length, frame_length=255, frame_step=128) spectrogram = tf.abs(spectrogram) return spectrogram for waveform, label in waveform_ds.take(1): label = label.numpy().decode("utf-8") spectrogram = get_spectrogram(waveform) print("Label:", label) print("Waveform shape:", waveform.shape) print("Spectrogram shape:", spectrogram.shape) print("Audio playback") display.display(display.Audio(waveform, rate=16000)) def plot_spectrogram(spectrogram, ax): # Convert to frequencies to log scale and transpose so that the time is # represented in the x-axis (columns). log_spec = np.log(spectrogram.T) height = log_spec.shape[0] width = log_spec.shape[1] X = np.linspace(0, np.size(spectrogram), num=width, dtype=int) Y = range(height) ax.pcolormesh(X, Y, log_spec) fig, axes = plt.subplots(2, figsize=(12, 8)) timescale = np.arange(waveform.shape[0]) axes[0].plot(timescale, waveform.numpy()) axes[0].set_title("Waveform") axes[0].set_xlim([0, 16000]) plot_spectrogram(spectrogram.numpy(), axes[1]) axes[1].set_title("Spectrogram") plt.show() def get_spectrogram_and_label_id(audio, label): spectrogram = get_spectrogram(audio) spectrogram = tf.expand_dims(spectrogram, -1) label_id = tf.argmax(label == commands) return spectrogram, label_id spectrogram_ds = waveform_ds.map( get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE ) rows = 3 cols = 3 n = rows * cols fig, axes = plt.subplots(rows, cols, figsize=(10, 10)) for i, (spectrogram, label_id) in enumerate(spectrogram_ds.take(n)): r = i // cols c = i % cols ax = axes[r][c] plot_spectrogram(np.squeeze(spectrogram.numpy()), ax) ax.set_title(commands[label_id.numpy()]) ax.axis("off") plt.show() def preprocess_dataset(files): files_ds = tf.data.Dataset.from_tensor_slices(files) output_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE) output_ds = output_ds.map(get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE) return output_ds train_ds = spectrogram_ds val_ds = preprocess_dataset(val_files) test_ds = preprocess_dataset(test_files) batch_size = 128 train_ds = train_ds.batch(batch_size) val_ds = val_ds.batch(batch_size) for spectrogram, _ in spectrogram_ds.take(1): input_shape = spectrogram.shape print("Input shape:", input_shape) num_labels = len(commands) print("num_labels:", num_labels) norm_layer = preprocessing.Normalization() # norm_layer.adapt(spectrogram_ds.map(lambda x, _: x)) model = models.Sequential( [ layers.Input(shape=input_shape), preprocessing.Resizing(96, 96), norm_layer, layers.Conv2D(512, 3, activation="relu"), layers.MaxPooling2D(), layers.Dropout(0.25), layers.Conv2D(256, 3, activation="relu"), layers.MaxPooling2D(), layers.Dropout(0.25), layers.Flatten(), layers.Dense(256, activation="relu"), layers.Dropout(0.5), layers.Dense(num_labels), ] ) model.summary() model.compile( optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"], ) EPOCHS = 20 history = model.fit( train_ds, # steps_per_epoch =20, validation_data=val_ds, epochs=EPOCHS, # callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=2), ) metrics = history.history plt.plot(history.epoch, metrics["loss"], metrics["val_loss"]) plt.legend(["loss", "val_loss"]) plt.show() test_audio = [] test_labels = [] for audio, label in test_ds: test_audio.append(audio.numpy()) test_labels.append(label.numpy()) test_audio = np.array(test_audio) test_labels = np.array(test_labels) y_pred = np.argmax(model.predict(test_audio), axis=1) y_true = test_labels test_acc = sum(y_pred == y_true) / len(y_true) print(f"Test set accuracy: {test_acc:.0%}")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/678/69678546.ipynb
ukrainian-open-speech-to-text-dataset-42-part-2
aikhmelnytskyy
[{"Id": 69678546, "ScriptId": 18919110, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2626211, "CreationDate": "08/02/2021 18:04:05", "VersionNumber": 5.0, "Title": "first model", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 266.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 254.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
[{"Id": 93147813, "KernelVersionId": 69678546, "SourceDatasetVersionId": 2407725}, {"Id": 93147812, "KernelVersionId": 69678546, "SourceDatasetVersionId": 2402538}, {"Id": 93147815, "KernelVersionId": 69678546, "SourceDatasetVersionId": 2474120}, {"Id": 93147814, "KernelVersionId": 69678546, "SourceDatasetVersionId": 2408730}]
[{"Id": 2407725, "DatasetId": 1453037, "DatasourceVersionId": 2449799, "CreatorUserId": 2626211, "LicenseName": "Unknown", "CreationDate": "07/08/2021 20:13:16", "VersionNumber": 4.0, "Title": "Ukrainian Open Speech To Text Dataset 4.2 part 2", "Slug": "ukrainian-open-speech-to-text-dataset-42-part-2", "Subtitle": NaN, "Description": "Type: Dataset\nTags: Ukrainian Open Speech To Text Dataset STT\nAbstract:\n\nSpeech Recognition for Ukrainian \ud83c\uddfa\ud83c\udde6 The aim of this repository is to collect information and datasets for speech recognition in Ukrainian.\n\nGet in touch with us in our Telegram group: https://t.me/speech_recognition_uk\n\nDatasets Compiled dataset from different open sources + Companies + Community = 188.31GB / ~1200 hours \ud83d\udcaa\n\nLicense: No license specified, the work may be protected by copyright.\nhttps://academictorrents.com/details/fcf8bb60c59e9eb583df003d54ed61776650beb8", "VersionNotes": "v4", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1453037, "CreatorUserId": 2626211, "OwnerUserId": 2626211.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2407725.0, "CurrentDatasourceVersionId": 2449799.0, "ForumId": 1472596, "Type": 2, "CreationDate": "07/07/2021 09:08:30", "LastActivityDate": "07/07/2021", "TotalViews": 1974, "TotalDownloads": 33, "TotalVotes": 5, "TotalKernels": 1}]
[{"Id": 2626211, "UserName": "aikhmelnytskyy", "DisplayName": "Andrij", "RegisterDate": "12/18/2018", "PerformanceTier": 3}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import nltk import re from sklearn.model_selection import train_test_split import os import pathlib import matplotlib.pyplot as plt import numpy as np import seaborn as sns import tensorflow as tf from tensorflow.keras.layers.experimental import preprocessing from tensorflow.keras import layers from tensorflow.keras import models from IPython import display from tensorflow.compat.v1 import ConfigProto from tensorflow.compat.v1 import InteractiveSession from collections import Counter config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) # Based on this example: https://www.tensorflow.org/tutorials/audio/simple_audio data = pd.read_csv( "../input/ukrainian-ostotextdataset42-dataframe/Ukrainian_Open_Speech_To_Text Dataset.csv" ) data["number_of_words"] = data["text"].apply(lambda x: len(str(x).split())) data["words"] = data["text"].apply(lambda x: str(x).split()) print(data) pd.set_option("display.max_columns", None) pd.set_option("display.max_colwidth", -1) print(data.shape) print(data.loc[data["number_of_words"] == 1]) datasets = data["dataset"].unique() data = data.dropna() # .fillna(value='', inplace=True) # data=data.loc[data['dataset'] == 'VR/'] data = data.loc[data["dataset"] != "1TVUKRAINIAN/"] data = data.loc[data["dataset"] != "GROSHI/"] data = data.loc[data["dataset"] != "MON/"] data = data.loc[data["number_of_words"] == 1] data = data[["path", "text"]] data["text"] = data["text"].str.replace(",", "-") print("kaggle datasets:", datasets) commands = data["text"].unique() print("commands:", len(commands)) # data=data.dropna()#.fillna(value='', inplace=True) print(len(data)) val_size = 0.2 # 0.2 test_size = 0.1 val_size = round(val_size * len(data)) test_size = round(test_size * len(data)) train_files_pd = data[: len(data) - val_size - test_size] val_files_pd = data[len(data) - val_size - test_size : len(data) - test_size] test_files_pd = data[len(data) - test_size :] train_files = train_files_pd.to_numpy() val_files = val_files_pd.to_numpy() test_files = test_files_pd.to_numpy() print("Training set size", len(train_files)) print("Validation set size", len(val_files)) print("Test set size", len(test_files)) def get_label(file_path): parts = tf.strings.split(file_path, os.path.sep) # Note: You'll use indexing here instead of tuple unpacking to enable this # to work in a TensorFlow graph. return parts[-2] def decode_audio(audio_binary): audio, _ = tf.audio.decode_wav(audio_binary) return tf.squeeze(audio, axis=-1) def get_waveform_and_label(file_path): label = file_path[1] audio_binary = tf.io.read_file(file_path[0]) print(audio_binary) waveform = decode_audio(audio_binary) return waveform, label AUTOTUNE = tf.data.AUTOTUNE files_ds = tf.data.Dataset.from_tensor_slices(train_files) print(files_ds) waveform_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE) print(files_ds) rows = 3 cols = 3 n = rows * cols fig, axes = plt.subplots(rows, cols, figsize=(10, 12)) for i, (audio, label) in enumerate(waveform_ds.take(n)): r = i // cols c = i % cols ax = axes[r][c] ax.plot(audio.numpy()) ax.set_yticks(np.arange(-1.2, 1.2, 0.2)) label = label.numpy().decode("utf-8") ax.set_title(label) plt.show() def get_spectrogram(waveform): # Padding for files with less than 16000 samples # if waveform.get_shape().as_list()[0]: print(([50000] - tf.shape(waveform)) < 0) if ([50000] - tf.shape(waveform)) < 0: waveform = tf.slice(waveform, [0], [50000]) zero_padding = tf.zeros([50000] - tf.shape(waveform), dtype=tf.float32) # print(waveform.get_shape().as_list()[0]) # print(tf.slice(waveform, [0], [100000], name=None # Concatenate audio with padding so that all audio clips will be of the # same length waveform = tf.cast(waveform, tf.float32) equal_length = tf.concat([waveform, zero_padding], 0) spectrogram = tf.signal.stft(equal_length, frame_length=255, frame_step=128) spectrogram = tf.abs(spectrogram) return spectrogram for waveform, label in waveform_ds.take(1): label = label.numpy().decode("utf-8") spectrogram = get_spectrogram(waveform) print("Label:", label) print("Waveform shape:", waveform.shape) print("Spectrogram shape:", spectrogram.shape) print("Audio playback") display.display(display.Audio(waveform, rate=16000)) def plot_spectrogram(spectrogram, ax): # Convert to frequencies to log scale and transpose so that the time is # represented in the x-axis (columns). log_spec = np.log(spectrogram.T) height = log_spec.shape[0] width = log_spec.shape[1] X = np.linspace(0, np.size(spectrogram), num=width, dtype=int) Y = range(height) ax.pcolormesh(X, Y, log_spec) fig, axes = plt.subplots(2, figsize=(12, 8)) timescale = np.arange(waveform.shape[0]) axes[0].plot(timescale, waveform.numpy()) axes[0].set_title("Waveform") axes[0].set_xlim([0, 16000]) plot_spectrogram(spectrogram.numpy(), axes[1]) axes[1].set_title("Spectrogram") plt.show() def get_spectrogram_and_label_id(audio, label): spectrogram = get_spectrogram(audio) spectrogram = tf.expand_dims(spectrogram, -1) label_id = tf.argmax(label == commands) return spectrogram, label_id spectrogram_ds = waveform_ds.map( get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE ) rows = 3 cols = 3 n = rows * cols fig, axes = plt.subplots(rows, cols, figsize=(10, 10)) for i, (spectrogram, label_id) in enumerate(spectrogram_ds.take(n)): r = i // cols c = i % cols ax = axes[r][c] plot_spectrogram(np.squeeze(spectrogram.numpy()), ax) ax.set_title(commands[label_id.numpy()]) ax.axis("off") plt.show() def preprocess_dataset(files): files_ds = tf.data.Dataset.from_tensor_slices(files) output_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE) output_ds = output_ds.map(get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE) return output_ds train_ds = spectrogram_ds val_ds = preprocess_dataset(val_files) test_ds = preprocess_dataset(test_files) batch_size = 128 train_ds = train_ds.batch(batch_size) val_ds = val_ds.batch(batch_size) for spectrogram, _ in spectrogram_ds.take(1): input_shape = spectrogram.shape print("Input shape:", input_shape) num_labels = len(commands) print("num_labels:", num_labels) norm_layer = preprocessing.Normalization() # norm_layer.adapt(spectrogram_ds.map(lambda x, _: x)) model = models.Sequential( [ layers.Input(shape=input_shape), preprocessing.Resizing(96, 96), norm_layer, layers.Conv2D(512, 3, activation="relu"), layers.MaxPooling2D(), layers.Dropout(0.25), layers.Conv2D(256, 3, activation="relu"), layers.MaxPooling2D(), layers.Dropout(0.25), layers.Flatten(), layers.Dense(256, activation="relu"), layers.Dropout(0.5), layers.Dense(num_labels), ] ) model.summary() model.compile( optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"], ) EPOCHS = 20 history = model.fit( train_ds, # steps_per_epoch =20, validation_data=val_ds, epochs=EPOCHS, # callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=2), ) metrics = history.history plt.plot(history.epoch, metrics["loss"], metrics["val_loss"]) plt.legend(["loss", "val_loss"]) plt.show() test_audio = [] test_labels = [] for audio, label in test_ds: test_audio.append(audio.numpy()) test_labels.append(label.numpy()) test_audio = np.array(test_audio) test_labels = np.array(test_labels) y_pred = np.argmax(model.predict(test_audio), axis=1) y_true = test_labels test_acc = sum(y_pred == y_true) / len(y_true) print(f"Test set accuracy: {test_acc:.0%}")
false
1
2,690
7
2,913
2,690
69869056
import nltk sentence = """The Natural Language Toolkit, or more commonly NLTK, is a suite of libraries and programs for symbolic and statistical natural language processing for English written in the Python programming language.""" tokens = nltk.word_tokenize(sentence) print(tokens) tagged = nltk.pos_tag(tokens) print(tagged) entities = nltk.chunk.ne_chunk(tagged) print(entities)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/869/69869056.ipynb
null
null
[{"Id": 69869056, "ScriptId": 19099695, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5240195, "CreationDate": "08/03/2021 17:06:10", "VersionNumber": 1.0, "Title": "NLTK Practice", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 12.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
import nltk sentence = """The Natural Language Toolkit, or more commonly NLTK, is a suite of libraries and programs for symbolic and statistical natural language processing for English written in the Python programming language.""" tokens = nltk.word_tokenize(sentence) print(tokens) tagged = nltk.pos_tag(tokens) print(tagged) entities = nltk.chunk.ne_chunk(tagged) print(entities)
false
0
92
1
92
92
69869700
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import plot_confusion_matrix, classification_report # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv") # print(train['keyword'].unique()) # print(train['location'].unique()) train = train.drop(["keyword", "location"], axis=1) test = test.drop(["keyword", "location"], axis=1) train = train.drop("id", axis=1) test = test.drop("id", axis=1) # remove special char and numbers def specialChar(data): data = [re.sub(r"[^a-zA-Z]", "", string) for string in data.split()] return " ".join(data) # Removed hhtps like words def removeHttps(data): data = data.split() for i in range(len(data)): if "htt" in data[i]: data[i] = "" return " ".join(data) # capital letters and small letters conversion def lowerCase(data): return data.lower() def removeExtraSpace(data): return re.sub(" +", " ", data) train["text"] = train["text"].apply(lambda data: specialChar(data)) train["text"] = train["text"].apply(lambda data: removeHttps(data)) train["text"] = train["text"].apply(lambda x: lowerCase(x)) train["text"] = train["text"].apply(lambda x: removeExtraSpace(x)) test["text"] = test["text"].apply(lambda data: specialChar(data)) test["text"] = test["text"].apply(lambda data: removeHttps(data)) test["text"] = test["text"].apply(lambda x: lowerCase(x)) test["text"] = test["text"].apply(lambda x: removeExtraSpace(x)) # # Base Model Comparison X_train, X_test, y_train, y_test = train_test_split( train["text"], train["target"], test_size=0.30, random_state=42 ) tf_idf = TfidfVectorizer( stop_words="english", ) train_transformed = tf_idf.fit_transform(X_train) test_transformed = tf_idf.transform(X_test) # Base Models Nb_model = MultinomialNB() linear_model = LogisticRegression() random_forest = RandomForestClassifier() def predict(model, X_train, X_test, y_train, y_test): model.fit(X_train, y_train) y_pred = model.predict(X_test) print(plot_confusion_matrix(model, X_test, y_test)) print(classification_report(y_test, y_pred)) print("Naive Baye's : ") predict(Nb_model, train_transformed, test_transformed, y_train, y_test) print("Logistic Regression : ") predict(linear_model, train_transformed, test_transformed, y_train, y_test) print("Support Vector classifier : ") support_vector = SVC( C=3, kernel="linear", degree=3, gamma="auto", ) predict(support_vector, train_transformed, test_transformed, y_train, y_test) ada = AdaBoostClassifier() predict(ada, train_transformed, test_transformed, y_train, y_test) Grad = GradientBoostingClassifier() predict(Grad, train_transformed, test_transformed, y_train, y_test) # # Final Model And Submission # Naive Baye's = 0.79 and Logistic train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") train = train.drop(["keyword", "location"], axis=1) train = train.drop("id", axis=1) train["text"] = train["text"].apply(lambda data: specialChar(data)) train["text"] = train["text"].apply(lambda data: removeHttps(data)) train["text"] = train["text"].apply(lambda x: lowerCase(x)) train["text"] = train["text"].apply(lambda x: removeExtraSpace(x)) """ tf_idf = TfidfVectorizer(stop_words='english') train_transformed = tf_idf.fit_transform(train['text']) test_transformed = tf_idf.transform(test['text']) model = MultinomialNB() model.fit(train_transformed,train['target']) y_pred = model.predict(test_transformed)""" tf_idf = TfidfVectorizer(stop_words="english") train_transformed = tf_idf.fit_transform(train["text"]) test_transformed = tf_idf.transform(test["text"]) model = LogisticRegression() model.fit(train_transformed, train["target"]) y_pred = model.predict(test_transformed) y_pred sample_submission = pd.read_csv( "/kaggle/input/nlp-getting-started/sample_submission.csv" ) sample_submission["target"] = y_pred sample_submission.head() sample_submission.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/869/69869700.ipynb
null
null
[{"Id": 69869700, "ScriptId": 18798995, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3338793, "CreationDate": "08/03/2021 17:08:05", "VersionNumber": 3.0, "Title": "Tweets_classification", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 152.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import plot_confusion_matrix, classification_report # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv") # print(train['keyword'].unique()) # print(train['location'].unique()) train = train.drop(["keyword", "location"], axis=1) test = test.drop(["keyword", "location"], axis=1) train = train.drop("id", axis=1) test = test.drop("id", axis=1) # remove special char and numbers def specialChar(data): data = [re.sub(r"[^a-zA-Z]", "", string) for string in data.split()] return " ".join(data) # Removed hhtps like words def removeHttps(data): data = data.split() for i in range(len(data)): if "htt" in data[i]: data[i] = "" return " ".join(data) # capital letters and small letters conversion def lowerCase(data): return data.lower() def removeExtraSpace(data): return re.sub(" +", " ", data) train["text"] = train["text"].apply(lambda data: specialChar(data)) train["text"] = train["text"].apply(lambda data: removeHttps(data)) train["text"] = train["text"].apply(lambda x: lowerCase(x)) train["text"] = train["text"].apply(lambda x: removeExtraSpace(x)) test["text"] = test["text"].apply(lambda data: specialChar(data)) test["text"] = test["text"].apply(lambda data: removeHttps(data)) test["text"] = test["text"].apply(lambda x: lowerCase(x)) test["text"] = test["text"].apply(lambda x: removeExtraSpace(x)) # # Base Model Comparison X_train, X_test, y_train, y_test = train_test_split( train["text"], train["target"], test_size=0.30, random_state=42 ) tf_idf = TfidfVectorizer( stop_words="english", ) train_transformed = tf_idf.fit_transform(X_train) test_transformed = tf_idf.transform(X_test) # Base Models Nb_model = MultinomialNB() linear_model = LogisticRegression() random_forest = RandomForestClassifier() def predict(model, X_train, X_test, y_train, y_test): model.fit(X_train, y_train) y_pred = model.predict(X_test) print(plot_confusion_matrix(model, X_test, y_test)) print(classification_report(y_test, y_pred)) print("Naive Baye's : ") predict(Nb_model, train_transformed, test_transformed, y_train, y_test) print("Logistic Regression : ") predict(linear_model, train_transformed, test_transformed, y_train, y_test) print("Support Vector classifier : ") support_vector = SVC( C=3, kernel="linear", degree=3, gamma="auto", ) predict(support_vector, train_transformed, test_transformed, y_train, y_test) ada = AdaBoostClassifier() predict(ada, train_transformed, test_transformed, y_train, y_test) Grad = GradientBoostingClassifier() predict(Grad, train_transformed, test_transformed, y_train, y_test) # # Final Model And Submission # Naive Baye's = 0.79 and Logistic train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") train = train.drop(["keyword", "location"], axis=1) train = train.drop("id", axis=1) train["text"] = train["text"].apply(lambda data: specialChar(data)) train["text"] = train["text"].apply(lambda data: removeHttps(data)) train["text"] = train["text"].apply(lambda x: lowerCase(x)) train["text"] = train["text"].apply(lambda x: removeExtraSpace(x)) """ tf_idf = TfidfVectorizer(stop_words='english') train_transformed = tf_idf.fit_transform(train['text']) test_transformed = tf_idf.transform(test['text']) model = MultinomialNB() model.fit(train_transformed,train['target']) y_pred = model.predict(test_transformed)""" tf_idf = TfidfVectorizer(stop_words="english") train_transformed = tf_idf.fit_transform(train["text"]) test_transformed = tf_idf.transform(test["text"]) model = LogisticRegression() model.fit(train_transformed, train["target"]) y_pred = model.predict(test_transformed) y_pred sample_submission = pd.read_csv( "/kaggle/input/nlp-getting-started/sample_submission.csv" ) sample_submission["target"] = y_pred sample_submission.head() sample_submission.to_csv("submission.csv", index=False)
false
0
1,526
0
1,526
1,526
69433409
<jupyter_start><jupyter_text>COVID-19 Open Research Dataset Challenge (CORD-19) ### Dataset Description In response to the COVID-19 pandemic, the White House and a coalition of leading research groups have prepared the COVID-19 Open Research Dataset (CORD-19). CORD-19 is a resource of over 500,000 scholarly articles, including over 200,000 with full text, about COVID-19, SARS-CoV-2, and related coronaviruses. This freely available dataset is provided to the global research community to apply recent advances in natural language processing and other AI techniques to generate new insights in support of the ongoing fight against this infectious disease. There is a growing urgency for these approaches because of the rapid acceleration in new coronavirus literature, making it difficult for the medical research community to keep up. ### Call to Action We are issuing a call to action to the world's artificial intelligence experts to develop text and data mining tools that can help the medical community develop answers to high priority scientific questions. The CORD-19 dataset represents the most extensive machine-readable coronavirus literature collection available for data mining to date. This allows the worldwide AI research community the opportunity to apply text and data mining approaches to find answers to questions within, and connect insights across, this content in support of the ongoing COVID-19 response efforts worldwide. There is a growing urgency for these approaches because of the rapid increase in coronavirus literature, making it difficult for the medical community to keep up. A list of our initial key questions can be found under the **[Tasks](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge/tasks)** section of this dataset. These key scientific questions are drawn from the NASEM’s SCIED (National Academies of Sciences, Engineering, and Medicine’s Standing Committee on Emerging Infectious Diseases and 21st Century Health Threats) [research topics](https://www.nationalacademies.org/event/03-11-2020/standing-committee-on-emerging-infectious-diseases-and-21st-century-health-threats-virtual-meeting-1) and the World Health Organization’s [R&D Blueprint](https://www.who.int/blueprint/priority-diseases/key-action/Global_Research_Forum_FINAL_VERSION_for_web_14_feb_2020.pdf?ua=1) for COVID-19. Many of these questions are suitable for text mining, and we encourage researchers to develop text mining tools to provide insights on these questions. We are maintaining a summary of the [community's contributions](https://www.kaggle.com/covid-19-contributions). For guidance on how to make your contributions useful, we're maintaining a [forum thread](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge/discussion/138484) with the feedback we're getting from the medical and health policy communities. ### Prizes Kaggle is sponsoring a *$1,000 per task* award to the winner whose submission is identified as best meeting the evaluation criteria. The winner may elect to receive this award as a charitable donation to COVID-19 relief/research efforts or as a monetary payment. More details on the prizes and timeline can be found on the [discussion post](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge/discussion/135826). ### Accessing the Dataset We have made this dataset available on Kaggle. Watch out for [periodic updates](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge/discussion/137474). The dataset is also hosted on [AI2's Semantic Scholar](https://pages.semanticscholar.org/coronavirus-research). And you can search the dataset using AI2's new [COVID-19 explorer](https://cord-19.apps.allenai.org/). The licenses for each dataset can be found in the all _ sources _ metadata csv file. Kaggle dataset identifier: CORD-19-research-challenge <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # install packages from collections import defaultdict import glob import itertools import json import pickle import os import re import bs4 import contractions import inflect from langdetect import detect import matplotlib.pyplot as plt import networkx as nx import nltk nltk.download("punkt") nltk.download("stopwords") nltk.download("wordnet") from nltk import tokenize from nltk.corpus import wordnet as wn from nltk.corpus import stopwords from nltk.stem import LancasterStemmer from nltk.stem import PorterStemmer from nltk.stem import WordNetLemmatizer import numpy as np import pandas as pd from pandarallel import pandarallel from PIL import Image import requests import seaborn as sns from sentence_transformers import SentenceTransformer from sklearn.cluster import KMeans from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity import spacy from spacy import displacy nlp = spacy.load("en_core_web_sm") from spacy.matcher import Matcher from spacy.tokens import Span from tqdm import tqdm from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator # Initialize pandarallel pandarallel.initialize(use_memory_fs=False, nb_workers=2) # pandas options pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) pd.set_option("display.width", 1000) pd.set_option("display.expand_frame_repr", False) pd.options.mode.chained_assignment = None tqdm.pandas() # make temp dir to save intermidiate data if not os.path.exists("../data"): os.mkdir("../data") # Help functions and class # help function to generate file path def filepath(*args): if len(args) < 1: return None elif len(args) == 1: return args[0] else: return f"{args[0]}/{filepath(*args[1:])}" # Add time bar to loop def addtimebar(L, threshold=1000): if len(L) > threshold: return tqdm(L) else: return L # File Reader Class class FileReader: def __init__(self, file_path): with open(file_path) as file: content = json.load(file) self.paper_id = content["paper_id"] self.abstract = [] self.body_text = [] # Abstract try: for entry in content["abstract"]: self.abstract.append(entry["text"]) except KeyError: pass # Body text try: for entry in content["body_text"]: self.body_text.append(entry["text"]) except KeyError: pass self.abstract = "\n".join(self.abstract) self.body_text = "\n".join(self.body_text) def __repr__(self): return f"{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}..." # Helper function adds break after every words when character length reach to certain amount. This is for the interactive plot so that hover tool fits the screen. def get_breaks(content, length): data = "" words = content.split(" ") total_chars = 0 # add break every length characters for i in range(len(words)): total_chars += len(words[i]) if total_chars > length: data = data + "<br>" + words[i] total_chars = 0 else: data = data + " " + words[i] return data ## composition function ## example: compose(f1,f2,f3)(x, y) = f3(f2(f1(x, y))) def compose(*funcs): *funcs, penultimate, last = funcs if funcs: penultimate = compose(*funcs, penultimate) return lambda *args: penultimate(last(*args)) # file path path = "/kaggle/input/CORD-19-research-challenge/" # may need to change when submit to kaggle meta = "metadata.csv" # path for all json files all_jsons = glob.glob(filepath(path, "**", "*.json"), recursive=True) # data.frame for meta data meta_df = pd.read_csv( filepath(path, meta), dtype={ "pubmed_id": str, "Microsoft Academic Paper ID": str, "doi": str, "journal": str, }, low_memory=False, ) print(len(meta_df)) # number of lines in meta_df_all meta_df.head(n=2) # Have a look the first line of text data first_row = FileReader(all_jsons[0]) print(first_row) # Load the text data into DataFrame dict_ = { "paper_id": [], "abstract": [], "body_text": [], "authors": [], "title": [], "publish_time": [], "journal": [], "abstract_summary": [], } for entry in addtimebar(all_jsons): content = FileReader(entry) # get metadata information meta_data = meta_df.loc[meta_df["sha"] == content.paper_id] # no metadata, skip this paper if len(meta_data) == 0: continue dict_["paper_id"].append(content.paper_id) dict_["abstract"].append(content.abstract) dict_["body_text"].append(content.body_text) # also create a column for the summary of abstract to be used in a plot if len(content.abstract) == 0: # no abstract provided dict_["abstract_summary"].append("Not provided.") elif len(content.abstract.split(" ")) > 100: # abstract provided is too long for plot, take first 300 words append with ... info = content.abstract.split(" ")[:100] summary = get_breaks(" ".join(info), 40) dict_["abstract_summary"].append(summary + "...") else: # abstract is short enough summary = get_breaks(content.abstract, 40) dict_["abstract_summary"].append(summary) # get metadata information meta_data = meta_df.loc[meta_df["sha"] == content.paper_id] try: # if more than one author authors = meta_data["authors"].values[0].split(";") if len(authors) > 2: # more than 2 authors, may be problem when plotting, so take first 2 append with ... dict_["authors"].append(". ".join(authors[:2]) + "...") else: # authors will fit in plot dict_["authors"].append(". ".join(authors)) except Exception as e: # if only one author - or Null valie dict_["authors"].append(meta_data["authors"].values[0]) # add the title information, add breaks when needed try: title = get_breaks(meta_data["title"].values[0], 40) dict_["title"].append(title) # if title was not provided except Exception as e: dict_["title"].append(meta_data["title"].values[0]) # add publish time try: publish_time = get_breaks(meta_data["publish_time"].values[0], 40) dict_["publish_time"].append(publish_time) # if publish time was not provided except Exception as e: dict_["publish_time"].append(meta_data["publish_time"].values[0]) # add the journal information dict_["journal"].append(meta_data["journal"].values[0]) df_covid = pd.DataFrame( dict_, columns=[ "paper_id", "abstract", "body_text", "authors", "title", "journal", "publish_time", "abstract_summary", ], ) df_covid.head() # save data df_covid.to_pickle("../data/df_kaggle_all.pkl") # load saved data # with open('../data/df_kaggle_all.pkl', 'rb') as fp: # df_covid = pickle.load(fp) # function to check if text of certain column in dataframe is written in certain language def is_lang(row, item, lang, dropNA=True): if ( row[item] != None and row[item] != "" and row[item] != "None" and isinstance(row[item], str) ): try: return detect(row[item]) == lang except Exception as e: # print("Non-readable entity will be droped from data.frame") return False else: return not dropNA # select article written in certain language def select_article_lang_multi(df, basedon="abstract", lang="en"): return df[df.parallel_apply(lambda text: is_lang(text, basedon, lang), axis=1)] df_covid_eng = select_article_lang_multi(df_covid) print("Number of English Articles: {}/{}".format(len(df_covid_eng), len(df_covid))) df_covid_eng.head(n=2) # save intermidiate data df_covid_eng.to_pickle("../data/df_kaggle_all_eng.pkl") # load saved data # with open('../data/df_kaggle_all_eng.pkl', 'rb') as fp: # df_covid_eng = pickle.load(fp) # Pre-processing functions ## text level processors def replace_brackets_with_whitespace(text): text = text.replace("(", "") text = text.replace(")", "") text = text.replace("[", "") text = text.replace("]", "") return text def replace_contractions(text): return contractions.fix(text) # remove special characters def strip_characters(text): t = re.sub("\(|\)|:|,|;|\.|’||“|\?|%|>|<", "", text) t = re.sub("/", " ", t) t = t.replace("'", "") return t ## word level processors: def to_lowercase(word): return word.lower() def do_stemming(stemmer): return lambda word: stemmer.stem(word) def do_lemmatizing(lemmatizer): return lambda word: lemmatizer.lemmatize(word, pos="v") # help function to test if word is stopword def is_stopword(word): return word in stopwords.words("english") # function to process word def process_word_by(word_cleanner, uniqueYN): def cond(word): return ( len(word) > 1 and not is_stopword(word) and not word.isnumeric() and word.isalnum() and word != len(word) * word[0] ) def clean_byword(text): return list( take_unique(uniqueYN)((word_cleanner(word) for word in text if cond(word))) ) return clean_byword # function to decide making a set (unique words) from text or not def take_unique(YN): return set if YN else lambda x: x # function to pre_processing the text ## compose text and word processors by combine every individual processor together text_processor = compose( replace_brackets_with_whitespace, replace_contractions, strip_characters ) word_processor = compose( to_lowercase, do_lemmatizing(WordNetLemmatizer()), do_stemming(PorterStemmer()) ) # it is crucial to do stemming after lemmatization ## pre_processing function taking a dataframe and text and word processor functions as input and clean the text and tokenize the specified column def pre_processing(df, text_tools, word_tools): def inner(col, uniqueYN=False): return ( df[col] .parallel_apply(text_tools) .parallel_apply(nltk.word_tokenize) .parallel_apply(process_word_by(word_tools, uniqueYN=uniqueYN)) ) return inner # sort by publish time tokenized_df = df_covid_eng.sort_values(by="publish_time", ascending=False) tokenized_df.head(n=3) # created processor function with chosen text and work processors and apply it to all articles to clean and tokenize all abstracts processor = pre_processing(tokenized_df, text_processor, word_processor) tokenized_df["abstract_token"] = processor("abstract") # reset index (this is necessary for cosine similarity search) tokenized_df = tokenized_df.reset_index(drop=True) # Our processor function is a generic procedure to clean and tokenize any column with user specified column name, such as 'abstract' or 'body_text' # Because processing body_text takes too long, we only process abstract # tokenized_df['body_text_token'] = processor('body_text') # store the dataframe to ../data/ tokenized_df.to_pickle("../data/df_kaggle_all_eng_tokenized.pkl") # with open('../data/df_kaggle_all_eng_tokenized.pkl', 'rb') as fp: # tokenized_df = pickle.load(fp) # have a look at the head of the cleanned and tokenized abstract column tokenized_df.head()["abstract_token"] tokenized_df.head() def get_top_nK_words(corpus, K=1, n=None): vec1 = CountVectorizer( max_df=0.7, stop_words=stopwords.words("english"), ngram_range=(K, K), max_features=2000, ).fit(corpus) bag_of_words = vec1.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec1.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) return words_freq[:n] # Convert most freq words to dataframe for plotting bar plot top_words = get_top_nK_words(corpus, K=1, n=20) top_df = pd.DataFrame(top_words) top_df.columns = ["Word", "Freq"] # Barplot of most freq words sns.set(rc={"figure.figsize": (13, 8)}) g = sns.barplot(x="Word", y="Freq", data=top_df) g.set_xticklabels(g.get_xticklabels(), rotation=30) # Top bi-grams top2_words = get_top_nK_words(corpus, K=2, n=20) top2_df = pd.DataFrame(top2_words) top2_df.columns = ["Bi-gram", "Freq"] print(top2_df) # Barplot of most freq Bi-grams import seaborn as sns sns.set(rc={"figure.figsize": (13, 8)}) h = sns.barplot(x="Bi-gram", y="Freq", data=top2_df) h.set_xticklabels(h.get_xticklabels(), rotation=45) fig = h.get_figure() top3_words = get_top_nK_words(corpus, K=3, n=20) top3_df = pd.DataFrame(top3_words) top3_df.columns = ["Tri-gram", "Freq"] print(top3_df) # Barplot of most freq Tri-grams import seaborn as sns sns.set(rc={"figure.figsize": (13, 8)}) j = sns.barplot(x="Tri-gram", y="Freq", data=top3_df) j.set_xticklabels(j.get_xticklabels(), rotation=45) fig = j.get_figure() # compute TF-IDF scores for word vectors def tfidf_(df): myvectorizer = TfidfVectorizer() vectors = myvectorizer.fit_transform( df["abstract_token"].parallel_apply(lambda x: " ".join(x)) ).toarray() feature_names = myvectorizer.get_feature_names() veclist = vectors.tolist() out_tfidf = pd.DataFrame(veclist, columns=feature_names) return out_tfidf tfidf_(tokenized_df[:20]).head() # using sklearn is 10 times faster than self-written script # extract key-words with tfidf score tfidf_scores_df = tfidf_(tokenized_df[:20]) N = 15 # Number of min/max values u = np.argpartition(tfidf_scores_df, axis=1, kth=N).values v = tfidf_scores_df.columns.values[u].reshape(u.shape) maxdf = pd.DataFrame(v[:, -N:]).rename(columns=lambda x: f"Max{x+1}") maxdf.head() # convert query token to vector def gen_vector_T(tokens): Q = np.zeros((len(vocabulary))) x = tfidf.transform(tokens) # print(tokens[0].split(',')) for token in tokens[0].split(","): # print(token) try: ind = vocabulary.index(token) Q[ind] = x[0, tfidf.vocabulary_[token]] except: pass return Q # calculate cosine similarity def cosine_sim(a, b): cos_sim = np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) return cos_sim # Function to get transformed tfidf model def tfidf_tran(mydf): vectorizer = TfidfVectorizer() vectors = vectorizer.fit_transform( mydf["abstract_token"].parallel_apply(lambda x: " ".join(x)) ) return vectors # Define wordLemmatizer # WordNetLemmatizer requires Pos tags to understand if the word is noun or verb or adjective etc. By default it is set to Noun def wordLemmatizer(data): tag_map = defaultdict(lambda: wn.NOUN) tag_map["J"] = wn.ADJ tag_map["V"] = wn.VERB tag_map["R"] = wn.ADV file_clean_k = pd.DataFrame() for index, entry in enumerate(data): # Declaring Empty List to store the words that follow the rules for this step Final_words = [] # Initializing WordNetLemmatizer() word_Lemmatized = WordNetLemmatizer() # pos_tag function below will provide the 'tag' i.e if the word is Noun(N) or Verb(V) or something else. for word, tag in nltk.pos_tag(entry): # Below condition is to check for Stop words and consider only alphabets if ( len(word) > 1 and word not in stopwords.words("english") and word.isalpha() ): word_Final = word_Lemmatized.lemmatize(word, tag_map[tag[0]]) Final_words.append(word_Final) # The final processed set of words for each iteration will be stored in 'text_final' file_clean_k.loc[index, "Keyword_final"] = str(Final_words).lower() file_clean_k = file_clean_k.replace( to_replace="\[.", value="", regex=True ) file_clean_k = file_clean_k.replace( to_replace="'", value="", regex=True ) file_clean_k = file_clean_k.replace( to_replace=" ", value="", regex=True ) file_clean_k = file_clean_k.replace( to_replace="\]", value="", regex=True ) return file_clean_k def cosine_similarity_T(k, query, text_token_df): preprocessed_query = re.sub("\W+", " ", query).strip() tokens = nltk.word_tokenize(text_processor(str(preprocessed_query).lower())) tokens = [ word_processor(token) for token in tokens if len(token) > 1 and not is_stopword(token) and not token.isnumeric() and token.isalnum() and token != len(token) * token[0] ] q_df = pd.DataFrame(columns=["q_clean"]) q_df.loc[0, "q_clean"] = tokens q_df["q_clean"] = wordLemmatizer(q_df.q_clean) d_cosines = [] # print(q_df['q_clean']) query_vector = gen_vector_T(q_df["q_clean"]) # print(query_vector) # print(q_df['q_clean']) # print(sum(query_vector)) for d in tfidf_tran.A: d_cosines.append(cosine_sim(query_vector, d)) # print(d_cosines) out = np.array(d_cosines).argsort()[-k:][::-1] d_cosines.sort() # print(out) a = pd.DataFrame() firsttime = True for i, index in enumerate(out): try: a.loc[i, "Paper ID"] = text_token_df["paper_id"][index] a.loc[i, "Title"] = text_token_df["title"][index] a.loc[i, "Summary"] = text_token_df["abstract_summary"][index] except KeyError as e: if firsttime: print("Fewer matches are found than requested {}".format(k)) firsttime = not firsttime pass for j, simScore in enumerate(d_cosines[-k:][::-1]): a.loc[j, "Score"] = simScore return a ## Create Vocabulary vocabulary = set() for tokens in tokenized_df.abstract_token: vocabulary.update(tokens) vocabulary = list(vocabulary) # Intializating the tfIdf model tfidf = TfidfVectorizer(vocabulary=vocabulary) # Transform the TfIdf model tfidf_tran = tfidf.fit_transform( tokenized_df["abstract_token"].parallel_apply(lambda x: " ".join(x)) ) # search engine using cosine similarity + TF-IDF TFIDF_output = cosine_similarity_T( 20000, "SARS-CoV-2 Covid-19 HCoV-19 Covid corona 2019-nCoV sars cov2 ncov wuhan coronavirus pneumonia", tokenized_df, ) TFIDF_output_significant = TFIDF_output[TFIDF_output["Score"] > 0] TFIDF_output_significant.head() # store the dataframe to ../data/ TFIDF_output_significant.to_pickle("../data/TFIDF_output_significant_all.pkl") # The amount of the most significant search results len(TFIDF_output_significant) get_top = 500 top_to_print = 10 # with open('../data/TFIDF_output_significant_all.pkl', 'rb') as fp: # TFIDF_output_significant = pickle.load(fp) with open("../data/df_kaggle_all_eng.pkl", "rb") as fp: df_covid_eng = pickle.load(fp) df_covid_eng.drop_duplicates(subset=["paper_id"], inplace=True) TFIDF_output_significant.drop_duplicates(subset=["Paper ID"], inplace=True) papers_to_embed = df_covid_eng.loc[ df_covid_eng["paper_id"].isin(TFIDF_output_significant["Paper ID"]) ].copy() sort_papers = ( TFIDF_output_significant.loc[ TFIDF_output_significant["Paper ID"].isin(papers_to_embed["paper_id"]) ] .sort_values(by="Score", ascending=False)["Paper ID"] .to_list() ) papers_to_embed = papers_to_embed.set_index("paper_id").loc[sort_papers].reset_index() tqdm.pandas(desc="Combining abstracts and body text") papers_to_embed["combined_text"] = papers_to_embed.progress_apply( lambda x: x["abstract"] + " " + x["body_text"], axis=1 ) tqdm.pandas(desc="Splitting abstracts into sentences") papers_to_embed["abstract_sentence"] = papers_to_embed["abstract"].progress_apply( tokenize.sent_tokenize ) tqdm.pandas(desc="Splitting papers into sentences") papers_to_embed["combined_text_sentence"] = papers_to_embed[ "combined_text" ].progress_apply(tokenize.sent_tokenize) embedder = SentenceTransformer("bert-base-nli-mean-tokens") sent_to_embed_abstr = list(itertools.chain(*papers_to_embed["abstract_sentence"])) sent_to_embed_comb = list( itertools.chain(*papers_to_embed["combined_text_sentence"].iloc[:get_top]) ) abstract_embed = np.array( embedder.encode(sent_to_embed_abstr, batch_size=64, show_progress_bar=True) ) comb_text_embed = np.array( embedder.encode(sent_to_embed_comb, batch_size=64, show_progress_bar=True) ) # save intermidiate data in case needed np.save("../data/abstr_data_encodings", abstract_embed) np.save("../data/comb_text_data_encodings", comb_text_embed) questions = [ ( "Evidence of animal infection with SARS-CoV-2 and its transmission to " "other hosts, including the spill-over to humans." ) ] questions_embed = np.array( embedder.encode(questions, batch_size=64, show_progress_bar=True) ) similarity_abstr = cosine_similarity(abstract_embed, questions_embed).squeeze() similarity_comb = cosine_similarity(comb_text_embed, questions_embed).squeeze() sort_args_abstr = np.argsort(similarity_abstr)[::-1] sim_sort_abstr = similarity_abstr[sort_args_abstr] sort_args_comb = np.argsort(similarity_comb)[::-1] sim_sort_comb = similarity_comb[sort_args_comb] paper_id_abst = np.array( list( itertools.chain( *papers_to_embed.progress_apply( lambda x: [x["paper_id"]] * len(x["abstract_sentence"]), axis=1 ).tolist() ) ) ) paper_id_comb = np.array( list( itertools.chain( *papers_to_embed.iloc[:get_top] .progress_apply( lambda x: [x["paper_id"]] * len(x["combined_text_sentence"]), axis=1 ) .tolist() ) ) ) interest_paper_id_abstr = paper_id_abst[sort_args_abstr] interest_sentences_abstr = np.array(sent_to_embed_abstr)[sort_args_abstr] interest_abstracts = ( papers_to_embed.set_index("paper_id") .loc[interest_paper_id_abstr]["abstract"] .tolist() ) interest_paper_id_comb = paper_id_comb[sort_args_comb] interest_sentences_comb = np.array(sent_to_embed_comb)[sort_args_comb] interest_comb_text = ( papers_to_embed.set_index("paper_id") .loc[interest_paper_id_comb]["combined_text"] .tolist() ) with open("interesting_papers_based_on_abstract.txt", "w") as f: for paper, sent, abst, metric in zip( interest_paper_id_abstr, interest_sentences_abstr, interest_abstracts, sim_sort_abstr, ): _ = f.write("Paper ID: " + paper + "\n") _ = f.write("Important sentence: " + sent + "\n") # _ = f.write('Associated abstract: ' + abst + '\n') _ = f.write("Cosine Similarity metric: " + "{0:.3f}".format(metric) + "\n") _ = f.write("\n") with open("interesting_papers_based_on_comb_text.txt", "w") as f: for paper, sent, comb_text, metric in zip( interest_paper_id_comb, interest_sentences_comb, interest_comb_text, sim_sort_comb, ): _ = f.write("Paper ID: " + paper + "\n") _ = f.write("Important sentence: " + sent + "\n") _ = f.write("Cosine Similarity metric: " + "{0:.3f}".format(metric) + "\n") # _ = f.write('Associated body text: ' + comb_text + '\n') _ = f.write("\n") print("Results based on abstract:") print('"""') with open("interesting_papers_based_on_abstract.txt", "r") as f: print("\n".join(f.read().splitlines()[: 4 * top_to_print])) print('"""') print("") print("Results based on abstract and body text:") print('"""') with open("interesting_papers_based_on_comb_text.txt", "r") as f: print("\n".join(f.read().splitlines()[: 4 * top_to_print])) print('"""') rows_to_sample = np.random.randint(len(comb_text_embed), size=1000) sentences_subset = np.array(sent_to_embed_comb)[rows_to_sample].tolist() embeddings_subset = comb_text_embed[rows_to_sample] # Perform kmean clustering num_clusters = 5 clustering_model = KMeans(n_clusters=num_clusters) _ = clustering_model.fit(embeddings_subset) cluster_assignment = clustering_model.labels_ clustered_sentences = [[] for i in range(num_clusters)] for sentence_id, cluster_id in enumerate(cluster_assignment): clustered_sentences[cluster_id].append(sentences_subset[sentence_id]) for i, cluster in enumerate(clustered_sentences): print("Cluster ", i + 1) print(cluster[:10]) print("") file_to_read = "./interesting_papers_based_on_comb_text.txt" content = None with open(file_to_read) as f: content = f.readlines() content = [x.strip() for x in content] content = [string for string in content if string != ""] top_results = content[0:100] # Select the first n elements. selected_top_sentences = [] for elem in top_results: if elem.startswith("Important sentence:"): selected_top_sentences.append(elem.replace("Important sentence:", "").strip()) # Select the first n sentences. selected_top_sentences = selected_top_sentences[0:20] # Some settings for the plot. pd.set_option("display.max_colwidth", 200) # The main idea is to go through a sentence and extract the subject and the object # as and when they are encountered. def get_entities(sent): ## chunk 1 ent1 = "" ent2 = "" prv_tok_dep = "" # dependency tag of previous token in the sentence prv_tok_text = "" # previous token in the sentence prefix = "" modifier = "" for tok in nlp(sent): ## chunk 2 # if token is a punctuation mark then move on to the next token if tok.dep_ != "punct": # check: token is a compound word or not if tok.dep_ == "compound": prefix = tok.text # if the previous word was also a 'compound' then add the current word to it if prv_tok_dep == "compound": prefix = prv_tok_text + " " + tok.text # check: token is a modifier or not if tok.dep_.endswith("mod") == True: modifier = tok.text # if the previous word was also a 'compound' then add the current word to it if prv_tok_dep == "compound": modifier = prv_tok_text + " " + tok.text ## chunk 3 if tok.dep_.find("subj") == True: ent1 = modifier + " " + prefix + " " + tok.text prefix = "" modifier = "" prv_tok_dep = "" prv_tok_text = "" ## chunk 4 if tok.dep_.find("obj") == True: ent2 = modifier + " " + prefix + " " + tok.text ## chunk 5 # update variables prv_tok_dep = tok.dep_ prv_tok_text = tok.text return [ent1.strip(), ent2.strip()] # Relation/Predicate Extraction. # The hypothesis is that the predicate is actually the main verb in a sentence. def get_relation(sent): doc = nlp(sent) # Matcher class object matcher = Matcher(nlp.vocab) # define the pattern pattern = [ {"DEP": "ROOT"}, {"DEP": "prep", "OP": "?"}, {"DEP": "agent", "OP": "?"}, {"POS": "ADJ", "OP": "?"}, ] matcher.add("matching_1", None, pattern) matches = matcher(doc) k = len(matches) - 1 span = doc[matches[k][1] : matches[k][2]] return span.text entity_pairs = [] for i in tqdm(selected_top_sentences): entity_pairs.append(get_entities(i)) # extract relationship relations = [get_relation(i) for i in tqdm(selected_top_sentences)] print(relations) # extract subject source = [i[0] for i in entity_pairs] print(source) # extract object target = [i[1] for i in entity_pairs] print(target) kg_df = pd.DataFrame({"source": source, "target": target, "edge": relations}) # Use the library networkx to build graph. # Create a directed graph from the dataframe first. sentence_graph = nx.from_pandas_edgelist( kg_df, "source", "target", edge_attr=True, create_using=nx.MultiDiGraph() ) plt.figure( figsize=(12, 9) ) # Change this to make plotting area for knowledge graph bigger or smaller. # For parameters of spring_layout, see # https://networkx.github.io/documentation/stable/reference/generated/networkx.drawing.layout.spring_layout.html # pos = nx.spring_layout(sentence_graph) pos = nx.spring_layout( sentence_graph, k=1.3, iterations=100, fixed=None, center=(0, 0), scale=4 ) nx.draw( sentence_graph, with_labels=True, node_color="skyblue", edge_cmap=plt.cm.Blues, pos=pos, ) plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/433/69433409.ipynb
CORD-19-research-challenge
null
[{"Id": 69433409, "ScriptId": 18945160, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2586372, "CreationDate": "07/31/2021 01:03:36", "VersionNumber": 3.0, "Title": "notebook8d4c7f3aeb", "EvaluationDate": "07/31/2021", "IsChange": false, "TotalLines": 810.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 810.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92594996, "KernelVersionId": 69433409, "SourceDatasetVersionId": 2422207}]
[{"Id": 2422207, "DatasetId": 551982, "DatasourceVersionId": 2464392, "CreatorUserId": 1314380, "LicenseName": "Other (specified in description)", "CreationDate": "07/13/2021 17:00:35", "VersionNumber": 94.0, "Title": "COVID-19 Open Research Dataset Challenge (CORD-19)", "Slug": "CORD-19-research-challenge", "Subtitle": "An AI challenge with AI2, CZI, MSR, Georgetown, NIH & The White House", "Description": "### Dataset Description\n\nIn response to the COVID-19 pandemic, the White House and a coalition of leading research groups have prepared the COVID-19 Open Research Dataset (CORD-19). CORD-19 is a resource of over 500,000 scholarly articles, including over 200,000 with full text, about COVID-19, SARS-CoV-2, and related coronaviruses. This freely available dataset is provided to the global research community to apply recent advances in natural language processing and other AI techniques to generate new insights in support of the ongoing fight against this infectious disease. There is a growing urgency for these approaches because of the rapid acceleration in new coronavirus literature, making it difficult for the medical research community to keep up.\n\n### Call to Action\n\nWe are issuing a call to action to the world's artificial intelligence experts to develop text and data mining tools that can help the medical community develop answers to high priority scientific questions. The CORD-19 dataset represents the most extensive machine-readable coronavirus literature collection available for data mining to date. This allows the worldwide AI research community the opportunity to apply text and data mining approaches to find answers to questions within, and connect insights across, this content in support of the ongoing COVID-19 response efforts worldwide. There is a growing urgency for these approaches because of the rapid increase in coronavirus literature, making it difficult for the medical community to keep up.\n\nA list of our initial key questions can be found under the **[Tasks](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge/tasks)** section of this dataset. These key scientific questions are drawn from the NASEM\u2019s SCIED (National Academies of Sciences, Engineering, and Medicine\u2019s Standing Committee on Emerging Infectious Diseases and 21st Century Health Threats) [research topics](https://www.nationalacademies.org/event/03-11-2020/standing-committee-on-emerging-infectious-diseases-and-21st-century-health-threats-virtual-meeting-1) and the World Health Organization\u2019s [R&D Blueprint](https://www.who.int/blueprint/priority-diseases/key-action/Global_Research_Forum_FINAL_VERSION_for_web_14_feb_2020.pdf?ua=1) for COVID-19. \n\nMany of these questions are suitable for text mining, and we encourage researchers to develop text mining tools to provide insights on these questions. \n\nWe are maintaining a summary of the [community's contributions](https://www.kaggle.com/covid-19-contributions). For guidance on how to make your contributions useful, we're maintaining a [forum thread](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge/discussion/138484) with the feedback we're getting from the medical and health policy communities. \n\n### Prizes\n\nKaggle is sponsoring a *$1,000 per task* award to the winner whose submission is identified as best meeting the evaluation criteria. The winner may elect to receive this award as a charitable donation to COVID-19 relief/research efforts or as a monetary payment. More details on the prizes and timeline can be found on the [discussion post](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge/discussion/135826).\n\n### Accessing the Dataset\n\nWe have made this dataset available on Kaggle. Watch out for [periodic updates](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge/discussion/137474). \n\nThe dataset is also hosted on [AI2's Semantic Scholar](https://pages.semanticscholar.org/coronavirus-research). And you can search the dataset using AI2's new [COVID-19 explorer](https://cord-19.apps.allenai.org/).\n\nThe licenses for each dataset can be found in the all _ sources _ metadata csv file.\n\n### Acknowledgements\n\n![](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F1314380%2Fae91071ed467eb59eaaaa42f0b7c040d%2Fcovid-19_partners_logos.png?generation=1591119342897058&alt=media)\n\n\nThis dataset was created by the Allen Institute for AI in partnership with the Chan Zuckerberg Initiative, Georgetown University\u2019s Center for Security and Emerging Technology, Microsoft Research, IBM, and the National Library of Medicine - National Institutes of Health, in coordination with The White House Office of Science and Technology Policy.", "VersionNotes": "2021-07-12", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 551982, "CreatorUserId": 2931338, "OwnerUserId": NaN, "OwnerOrganizationId": 3737.0, "CurrentDatasetVersionId": 3756201.0, "CurrentDatasourceVersionId": 3810704.0, "ForumId": 565591, "Type": 2, "CreationDate": "03/12/2020 20:05:08", "LastActivityDate": "03/12/2020", "TotalViews": 4468011, "TotalDownloads": 163921, "TotalVotes": 10518, "TotalKernels": 1717}]
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # install packages from collections import defaultdict import glob import itertools import json import pickle import os import re import bs4 import contractions import inflect from langdetect import detect import matplotlib.pyplot as plt import networkx as nx import nltk nltk.download("punkt") nltk.download("stopwords") nltk.download("wordnet") from nltk import tokenize from nltk.corpus import wordnet as wn from nltk.corpus import stopwords from nltk.stem import LancasterStemmer from nltk.stem import PorterStemmer from nltk.stem import WordNetLemmatizer import numpy as np import pandas as pd from pandarallel import pandarallel from PIL import Image import requests import seaborn as sns from sentence_transformers import SentenceTransformer from sklearn.cluster import KMeans from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity import spacy from spacy import displacy nlp = spacy.load("en_core_web_sm") from spacy.matcher import Matcher from spacy.tokens import Span from tqdm import tqdm from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator # Initialize pandarallel pandarallel.initialize(use_memory_fs=False, nb_workers=2) # pandas options pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) pd.set_option("display.width", 1000) pd.set_option("display.expand_frame_repr", False) pd.options.mode.chained_assignment = None tqdm.pandas() # make temp dir to save intermidiate data if not os.path.exists("../data"): os.mkdir("../data") # Help functions and class # help function to generate file path def filepath(*args): if len(args) < 1: return None elif len(args) == 1: return args[0] else: return f"{args[0]}/{filepath(*args[1:])}" # Add time bar to loop def addtimebar(L, threshold=1000): if len(L) > threshold: return tqdm(L) else: return L # File Reader Class class FileReader: def __init__(self, file_path): with open(file_path) as file: content = json.load(file) self.paper_id = content["paper_id"] self.abstract = [] self.body_text = [] # Abstract try: for entry in content["abstract"]: self.abstract.append(entry["text"]) except KeyError: pass # Body text try: for entry in content["body_text"]: self.body_text.append(entry["text"]) except KeyError: pass self.abstract = "\n".join(self.abstract) self.body_text = "\n".join(self.body_text) def __repr__(self): return f"{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}..." # Helper function adds break after every words when character length reach to certain amount. This is for the interactive plot so that hover tool fits the screen. def get_breaks(content, length): data = "" words = content.split(" ") total_chars = 0 # add break every length characters for i in range(len(words)): total_chars += len(words[i]) if total_chars > length: data = data + "<br>" + words[i] total_chars = 0 else: data = data + " " + words[i] return data ## composition function ## example: compose(f1,f2,f3)(x, y) = f3(f2(f1(x, y))) def compose(*funcs): *funcs, penultimate, last = funcs if funcs: penultimate = compose(*funcs, penultimate) return lambda *args: penultimate(last(*args)) # file path path = "/kaggle/input/CORD-19-research-challenge/" # may need to change when submit to kaggle meta = "metadata.csv" # path for all json files all_jsons = glob.glob(filepath(path, "**", "*.json"), recursive=True) # data.frame for meta data meta_df = pd.read_csv( filepath(path, meta), dtype={ "pubmed_id": str, "Microsoft Academic Paper ID": str, "doi": str, "journal": str, }, low_memory=False, ) print(len(meta_df)) # number of lines in meta_df_all meta_df.head(n=2) # Have a look the first line of text data first_row = FileReader(all_jsons[0]) print(first_row) # Load the text data into DataFrame dict_ = { "paper_id": [], "abstract": [], "body_text": [], "authors": [], "title": [], "publish_time": [], "journal": [], "abstract_summary": [], } for entry in addtimebar(all_jsons): content = FileReader(entry) # get metadata information meta_data = meta_df.loc[meta_df["sha"] == content.paper_id] # no metadata, skip this paper if len(meta_data) == 0: continue dict_["paper_id"].append(content.paper_id) dict_["abstract"].append(content.abstract) dict_["body_text"].append(content.body_text) # also create a column for the summary of abstract to be used in a plot if len(content.abstract) == 0: # no abstract provided dict_["abstract_summary"].append("Not provided.") elif len(content.abstract.split(" ")) > 100: # abstract provided is too long for plot, take first 300 words append with ... info = content.abstract.split(" ")[:100] summary = get_breaks(" ".join(info), 40) dict_["abstract_summary"].append(summary + "...") else: # abstract is short enough summary = get_breaks(content.abstract, 40) dict_["abstract_summary"].append(summary) # get metadata information meta_data = meta_df.loc[meta_df["sha"] == content.paper_id] try: # if more than one author authors = meta_data["authors"].values[0].split(";") if len(authors) > 2: # more than 2 authors, may be problem when plotting, so take first 2 append with ... dict_["authors"].append(". ".join(authors[:2]) + "...") else: # authors will fit in plot dict_["authors"].append(". ".join(authors)) except Exception as e: # if only one author - or Null valie dict_["authors"].append(meta_data["authors"].values[0]) # add the title information, add breaks when needed try: title = get_breaks(meta_data["title"].values[0], 40) dict_["title"].append(title) # if title was not provided except Exception as e: dict_["title"].append(meta_data["title"].values[0]) # add publish time try: publish_time = get_breaks(meta_data["publish_time"].values[0], 40) dict_["publish_time"].append(publish_time) # if publish time was not provided except Exception as e: dict_["publish_time"].append(meta_data["publish_time"].values[0]) # add the journal information dict_["journal"].append(meta_data["journal"].values[0]) df_covid = pd.DataFrame( dict_, columns=[ "paper_id", "abstract", "body_text", "authors", "title", "journal", "publish_time", "abstract_summary", ], ) df_covid.head() # save data df_covid.to_pickle("../data/df_kaggle_all.pkl") # load saved data # with open('../data/df_kaggle_all.pkl', 'rb') as fp: # df_covid = pickle.load(fp) # function to check if text of certain column in dataframe is written in certain language def is_lang(row, item, lang, dropNA=True): if ( row[item] != None and row[item] != "" and row[item] != "None" and isinstance(row[item], str) ): try: return detect(row[item]) == lang except Exception as e: # print("Non-readable entity will be droped from data.frame") return False else: return not dropNA # select article written in certain language def select_article_lang_multi(df, basedon="abstract", lang="en"): return df[df.parallel_apply(lambda text: is_lang(text, basedon, lang), axis=1)] df_covid_eng = select_article_lang_multi(df_covid) print("Number of English Articles: {}/{}".format(len(df_covid_eng), len(df_covid))) df_covid_eng.head(n=2) # save intermidiate data df_covid_eng.to_pickle("../data/df_kaggle_all_eng.pkl") # load saved data # with open('../data/df_kaggle_all_eng.pkl', 'rb') as fp: # df_covid_eng = pickle.load(fp) # Pre-processing functions ## text level processors def replace_brackets_with_whitespace(text): text = text.replace("(", "") text = text.replace(")", "") text = text.replace("[", "") text = text.replace("]", "") return text def replace_contractions(text): return contractions.fix(text) # remove special characters def strip_characters(text): t = re.sub("\(|\)|:|,|;|\.|’||“|\?|%|>|<", "", text) t = re.sub("/", " ", t) t = t.replace("'", "") return t ## word level processors: def to_lowercase(word): return word.lower() def do_stemming(stemmer): return lambda word: stemmer.stem(word) def do_lemmatizing(lemmatizer): return lambda word: lemmatizer.lemmatize(word, pos="v") # help function to test if word is stopword def is_stopword(word): return word in stopwords.words("english") # function to process word def process_word_by(word_cleanner, uniqueYN): def cond(word): return ( len(word) > 1 and not is_stopword(word) and not word.isnumeric() and word.isalnum() and word != len(word) * word[0] ) def clean_byword(text): return list( take_unique(uniqueYN)((word_cleanner(word) for word in text if cond(word))) ) return clean_byword # function to decide making a set (unique words) from text or not def take_unique(YN): return set if YN else lambda x: x # function to pre_processing the text ## compose text and word processors by combine every individual processor together text_processor = compose( replace_brackets_with_whitespace, replace_contractions, strip_characters ) word_processor = compose( to_lowercase, do_lemmatizing(WordNetLemmatizer()), do_stemming(PorterStemmer()) ) # it is crucial to do stemming after lemmatization ## pre_processing function taking a dataframe and text and word processor functions as input and clean the text and tokenize the specified column def pre_processing(df, text_tools, word_tools): def inner(col, uniqueYN=False): return ( df[col] .parallel_apply(text_tools) .parallel_apply(nltk.word_tokenize) .parallel_apply(process_word_by(word_tools, uniqueYN=uniqueYN)) ) return inner # sort by publish time tokenized_df = df_covid_eng.sort_values(by="publish_time", ascending=False) tokenized_df.head(n=3) # created processor function with chosen text and work processors and apply it to all articles to clean and tokenize all abstracts processor = pre_processing(tokenized_df, text_processor, word_processor) tokenized_df["abstract_token"] = processor("abstract") # reset index (this is necessary for cosine similarity search) tokenized_df = tokenized_df.reset_index(drop=True) # Our processor function is a generic procedure to clean and tokenize any column with user specified column name, such as 'abstract' or 'body_text' # Because processing body_text takes too long, we only process abstract # tokenized_df['body_text_token'] = processor('body_text') # store the dataframe to ../data/ tokenized_df.to_pickle("../data/df_kaggle_all_eng_tokenized.pkl") # with open('../data/df_kaggle_all_eng_tokenized.pkl', 'rb') as fp: # tokenized_df = pickle.load(fp) # have a look at the head of the cleanned and tokenized abstract column tokenized_df.head()["abstract_token"] tokenized_df.head() def get_top_nK_words(corpus, K=1, n=None): vec1 = CountVectorizer( max_df=0.7, stop_words=stopwords.words("english"), ngram_range=(K, K), max_features=2000, ).fit(corpus) bag_of_words = vec1.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec1.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) return words_freq[:n] # Convert most freq words to dataframe for plotting bar plot top_words = get_top_nK_words(corpus, K=1, n=20) top_df = pd.DataFrame(top_words) top_df.columns = ["Word", "Freq"] # Barplot of most freq words sns.set(rc={"figure.figsize": (13, 8)}) g = sns.barplot(x="Word", y="Freq", data=top_df) g.set_xticklabels(g.get_xticklabels(), rotation=30) # Top bi-grams top2_words = get_top_nK_words(corpus, K=2, n=20) top2_df = pd.DataFrame(top2_words) top2_df.columns = ["Bi-gram", "Freq"] print(top2_df) # Barplot of most freq Bi-grams import seaborn as sns sns.set(rc={"figure.figsize": (13, 8)}) h = sns.barplot(x="Bi-gram", y="Freq", data=top2_df) h.set_xticklabels(h.get_xticklabels(), rotation=45) fig = h.get_figure() top3_words = get_top_nK_words(corpus, K=3, n=20) top3_df = pd.DataFrame(top3_words) top3_df.columns = ["Tri-gram", "Freq"] print(top3_df) # Barplot of most freq Tri-grams import seaborn as sns sns.set(rc={"figure.figsize": (13, 8)}) j = sns.barplot(x="Tri-gram", y="Freq", data=top3_df) j.set_xticklabels(j.get_xticklabels(), rotation=45) fig = j.get_figure() # compute TF-IDF scores for word vectors def tfidf_(df): myvectorizer = TfidfVectorizer() vectors = myvectorizer.fit_transform( df["abstract_token"].parallel_apply(lambda x: " ".join(x)) ).toarray() feature_names = myvectorizer.get_feature_names() veclist = vectors.tolist() out_tfidf = pd.DataFrame(veclist, columns=feature_names) return out_tfidf tfidf_(tokenized_df[:20]).head() # using sklearn is 10 times faster than self-written script # extract key-words with tfidf score tfidf_scores_df = tfidf_(tokenized_df[:20]) N = 15 # Number of min/max values u = np.argpartition(tfidf_scores_df, axis=1, kth=N).values v = tfidf_scores_df.columns.values[u].reshape(u.shape) maxdf = pd.DataFrame(v[:, -N:]).rename(columns=lambda x: f"Max{x+1}") maxdf.head() # convert query token to vector def gen_vector_T(tokens): Q = np.zeros((len(vocabulary))) x = tfidf.transform(tokens) # print(tokens[0].split(',')) for token in tokens[0].split(","): # print(token) try: ind = vocabulary.index(token) Q[ind] = x[0, tfidf.vocabulary_[token]] except: pass return Q # calculate cosine similarity def cosine_sim(a, b): cos_sim = np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) return cos_sim # Function to get transformed tfidf model def tfidf_tran(mydf): vectorizer = TfidfVectorizer() vectors = vectorizer.fit_transform( mydf["abstract_token"].parallel_apply(lambda x: " ".join(x)) ) return vectors # Define wordLemmatizer # WordNetLemmatizer requires Pos tags to understand if the word is noun or verb or adjective etc. By default it is set to Noun def wordLemmatizer(data): tag_map = defaultdict(lambda: wn.NOUN) tag_map["J"] = wn.ADJ tag_map["V"] = wn.VERB tag_map["R"] = wn.ADV file_clean_k = pd.DataFrame() for index, entry in enumerate(data): # Declaring Empty List to store the words that follow the rules for this step Final_words = [] # Initializing WordNetLemmatizer() word_Lemmatized = WordNetLemmatizer() # pos_tag function below will provide the 'tag' i.e if the word is Noun(N) or Verb(V) or something else. for word, tag in nltk.pos_tag(entry): # Below condition is to check for Stop words and consider only alphabets if ( len(word) > 1 and word not in stopwords.words("english") and word.isalpha() ): word_Final = word_Lemmatized.lemmatize(word, tag_map[tag[0]]) Final_words.append(word_Final) # The final processed set of words for each iteration will be stored in 'text_final' file_clean_k.loc[index, "Keyword_final"] = str(Final_words).lower() file_clean_k = file_clean_k.replace( to_replace="\[.", value="", regex=True ) file_clean_k = file_clean_k.replace( to_replace="'", value="", regex=True ) file_clean_k = file_clean_k.replace( to_replace=" ", value="", regex=True ) file_clean_k = file_clean_k.replace( to_replace="\]", value="", regex=True ) return file_clean_k def cosine_similarity_T(k, query, text_token_df): preprocessed_query = re.sub("\W+", " ", query).strip() tokens = nltk.word_tokenize(text_processor(str(preprocessed_query).lower())) tokens = [ word_processor(token) for token in tokens if len(token) > 1 and not is_stopword(token) and not token.isnumeric() and token.isalnum() and token != len(token) * token[0] ] q_df = pd.DataFrame(columns=["q_clean"]) q_df.loc[0, "q_clean"] = tokens q_df["q_clean"] = wordLemmatizer(q_df.q_clean) d_cosines = [] # print(q_df['q_clean']) query_vector = gen_vector_T(q_df["q_clean"]) # print(query_vector) # print(q_df['q_clean']) # print(sum(query_vector)) for d in tfidf_tran.A: d_cosines.append(cosine_sim(query_vector, d)) # print(d_cosines) out = np.array(d_cosines).argsort()[-k:][::-1] d_cosines.sort() # print(out) a = pd.DataFrame() firsttime = True for i, index in enumerate(out): try: a.loc[i, "Paper ID"] = text_token_df["paper_id"][index] a.loc[i, "Title"] = text_token_df["title"][index] a.loc[i, "Summary"] = text_token_df["abstract_summary"][index] except KeyError as e: if firsttime: print("Fewer matches are found than requested {}".format(k)) firsttime = not firsttime pass for j, simScore in enumerate(d_cosines[-k:][::-1]): a.loc[j, "Score"] = simScore return a ## Create Vocabulary vocabulary = set() for tokens in tokenized_df.abstract_token: vocabulary.update(tokens) vocabulary = list(vocabulary) # Intializating the tfIdf model tfidf = TfidfVectorizer(vocabulary=vocabulary) # Transform the TfIdf model tfidf_tran = tfidf.fit_transform( tokenized_df["abstract_token"].parallel_apply(lambda x: " ".join(x)) ) # search engine using cosine similarity + TF-IDF TFIDF_output = cosine_similarity_T( 20000, "SARS-CoV-2 Covid-19 HCoV-19 Covid corona 2019-nCoV sars cov2 ncov wuhan coronavirus pneumonia", tokenized_df, ) TFIDF_output_significant = TFIDF_output[TFIDF_output["Score"] > 0] TFIDF_output_significant.head() # store the dataframe to ../data/ TFIDF_output_significant.to_pickle("../data/TFIDF_output_significant_all.pkl") # The amount of the most significant search results len(TFIDF_output_significant) get_top = 500 top_to_print = 10 # with open('../data/TFIDF_output_significant_all.pkl', 'rb') as fp: # TFIDF_output_significant = pickle.load(fp) with open("../data/df_kaggle_all_eng.pkl", "rb") as fp: df_covid_eng = pickle.load(fp) df_covid_eng.drop_duplicates(subset=["paper_id"], inplace=True) TFIDF_output_significant.drop_duplicates(subset=["Paper ID"], inplace=True) papers_to_embed = df_covid_eng.loc[ df_covid_eng["paper_id"].isin(TFIDF_output_significant["Paper ID"]) ].copy() sort_papers = ( TFIDF_output_significant.loc[ TFIDF_output_significant["Paper ID"].isin(papers_to_embed["paper_id"]) ] .sort_values(by="Score", ascending=False)["Paper ID"] .to_list() ) papers_to_embed = papers_to_embed.set_index("paper_id").loc[sort_papers].reset_index() tqdm.pandas(desc="Combining abstracts and body text") papers_to_embed["combined_text"] = papers_to_embed.progress_apply( lambda x: x["abstract"] + " " + x["body_text"], axis=1 ) tqdm.pandas(desc="Splitting abstracts into sentences") papers_to_embed["abstract_sentence"] = papers_to_embed["abstract"].progress_apply( tokenize.sent_tokenize ) tqdm.pandas(desc="Splitting papers into sentences") papers_to_embed["combined_text_sentence"] = papers_to_embed[ "combined_text" ].progress_apply(tokenize.sent_tokenize) embedder = SentenceTransformer("bert-base-nli-mean-tokens") sent_to_embed_abstr = list(itertools.chain(*papers_to_embed["abstract_sentence"])) sent_to_embed_comb = list( itertools.chain(*papers_to_embed["combined_text_sentence"].iloc[:get_top]) ) abstract_embed = np.array( embedder.encode(sent_to_embed_abstr, batch_size=64, show_progress_bar=True) ) comb_text_embed = np.array( embedder.encode(sent_to_embed_comb, batch_size=64, show_progress_bar=True) ) # save intermidiate data in case needed np.save("../data/abstr_data_encodings", abstract_embed) np.save("../data/comb_text_data_encodings", comb_text_embed) questions = [ ( "Evidence of animal infection with SARS-CoV-2 and its transmission to " "other hosts, including the spill-over to humans." ) ] questions_embed = np.array( embedder.encode(questions, batch_size=64, show_progress_bar=True) ) similarity_abstr = cosine_similarity(abstract_embed, questions_embed).squeeze() similarity_comb = cosine_similarity(comb_text_embed, questions_embed).squeeze() sort_args_abstr = np.argsort(similarity_abstr)[::-1] sim_sort_abstr = similarity_abstr[sort_args_abstr] sort_args_comb = np.argsort(similarity_comb)[::-1] sim_sort_comb = similarity_comb[sort_args_comb] paper_id_abst = np.array( list( itertools.chain( *papers_to_embed.progress_apply( lambda x: [x["paper_id"]] * len(x["abstract_sentence"]), axis=1 ).tolist() ) ) ) paper_id_comb = np.array( list( itertools.chain( *papers_to_embed.iloc[:get_top] .progress_apply( lambda x: [x["paper_id"]] * len(x["combined_text_sentence"]), axis=1 ) .tolist() ) ) ) interest_paper_id_abstr = paper_id_abst[sort_args_abstr] interest_sentences_abstr = np.array(sent_to_embed_abstr)[sort_args_abstr] interest_abstracts = ( papers_to_embed.set_index("paper_id") .loc[interest_paper_id_abstr]["abstract"] .tolist() ) interest_paper_id_comb = paper_id_comb[sort_args_comb] interest_sentences_comb = np.array(sent_to_embed_comb)[sort_args_comb] interest_comb_text = ( papers_to_embed.set_index("paper_id") .loc[interest_paper_id_comb]["combined_text"] .tolist() ) with open("interesting_papers_based_on_abstract.txt", "w") as f: for paper, sent, abst, metric in zip( interest_paper_id_abstr, interest_sentences_abstr, interest_abstracts, sim_sort_abstr, ): _ = f.write("Paper ID: " + paper + "\n") _ = f.write("Important sentence: " + sent + "\n") # _ = f.write('Associated abstract: ' + abst + '\n') _ = f.write("Cosine Similarity metric: " + "{0:.3f}".format(metric) + "\n") _ = f.write("\n") with open("interesting_papers_based_on_comb_text.txt", "w") as f: for paper, sent, comb_text, metric in zip( interest_paper_id_comb, interest_sentences_comb, interest_comb_text, sim_sort_comb, ): _ = f.write("Paper ID: " + paper + "\n") _ = f.write("Important sentence: " + sent + "\n") _ = f.write("Cosine Similarity metric: " + "{0:.3f}".format(metric) + "\n") # _ = f.write('Associated body text: ' + comb_text + '\n') _ = f.write("\n") print("Results based on abstract:") print('"""') with open("interesting_papers_based_on_abstract.txt", "r") as f: print("\n".join(f.read().splitlines()[: 4 * top_to_print])) print('"""') print("") print("Results based on abstract and body text:") print('"""') with open("interesting_papers_based_on_comb_text.txt", "r") as f: print("\n".join(f.read().splitlines()[: 4 * top_to_print])) print('"""') rows_to_sample = np.random.randint(len(comb_text_embed), size=1000) sentences_subset = np.array(sent_to_embed_comb)[rows_to_sample].tolist() embeddings_subset = comb_text_embed[rows_to_sample] # Perform kmean clustering num_clusters = 5 clustering_model = KMeans(n_clusters=num_clusters) _ = clustering_model.fit(embeddings_subset) cluster_assignment = clustering_model.labels_ clustered_sentences = [[] for i in range(num_clusters)] for sentence_id, cluster_id in enumerate(cluster_assignment): clustered_sentences[cluster_id].append(sentences_subset[sentence_id]) for i, cluster in enumerate(clustered_sentences): print("Cluster ", i + 1) print(cluster[:10]) print("") file_to_read = "./interesting_papers_based_on_comb_text.txt" content = None with open(file_to_read) as f: content = f.readlines() content = [x.strip() for x in content] content = [string for string in content if string != ""] top_results = content[0:100] # Select the first n elements. selected_top_sentences = [] for elem in top_results: if elem.startswith("Important sentence:"): selected_top_sentences.append(elem.replace("Important sentence:", "").strip()) # Select the first n sentences. selected_top_sentences = selected_top_sentences[0:20] # Some settings for the plot. pd.set_option("display.max_colwidth", 200) # The main idea is to go through a sentence and extract the subject and the object # as and when they are encountered. def get_entities(sent): ## chunk 1 ent1 = "" ent2 = "" prv_tok_dep = "" # dependency tag of previous token in the sentence prv_tok_text = "" # previous token in the sentence prefix = "" modifier = "" for tok in nlp(sent): ## chunk 2 # if token is a punctuation mark then move on to the next token if tok.dep_ != "punct": # check: token is a compound word or not if tok.dep_ == "compound": prefix = tok.text # if the previous word was also a 'compound' then add the current word to it if prv_tok_dep == "compound": prefix = prv_tok_text + " " + tok.text # check: token is a modifier or not if tok.dep_.endswith("mod") == True: modifier = tok.text # if the previous word was also a 'compound' then add the current word to it if prv_tok_dep == "compound": modifier = prv_tok_text + " " + tok.text ## chunk 3 if tok.dep_.find("subj") == True: ent1 = modifier + " " + prefix + " " + tok.text prefix = "" modifier = "" prv_tok_dep = "" prv_tok_text = "" ## chunk 4 if tok.dep_.find("obj") == True: ent2 = modifier + " " + prefix + " " + tok.text ## chunk 5 # update variables prv_tok_dep = tok.dep_ prv_tok_text = tok.text return [ent1.strip(), ent2.strip()] # Relation/Predicate Extraction. # The hypothesis is that the predicate is actually the main verb in a sentence. def get_relation(sent): doc = nlp(sent) # Matcher class object matcher = Matcher(nlp.vocab) # define the pattern pattern = [ {"DEP": "ROOT"}, {"DEP": "prep", "OP": "?"}, {"DEP": "agent", "OP": "?"}, {"POS": "ADJ", "OP": "?"}, ] matcher.add("matching_1", None, pattern) matches = matcher(doc) k = len(matches) - 1 span = doc[matches[k][1] : matches[k][2]] return span.text entity_pairs = [] for i in tqdm(selected_top_sentences): entity_pairs.append(get_entities(i)) # extract relationship relations = [get_relation(i) for i in tqdm(selected_top_sentences)] print(relations) # extract subject source = [i[0] for i in entity_pairs] print(source) # extract object target = [i[1] for i in entity_pairs] print(target) kg_df = pd.DataFrame({"source": source, "target": target, "edge": relations}) # Use the library networkx to build graph. # Create a directed graph from the dataframe first. sentence_graph = nx.from_pandas_edgelist( kg_df, "source", "target", edge_attr=True, create_using=nx.MultiDiGraph() ) plt.figure( figsize=(12, 9) ) # Change this to make plotting area for knowledge graph bigger or smaller. # For parameters of spring_layout, see # https://networkx.github.io/documentation/stable/reference/generated/networkx.drawing.layout.spring_layout.html # pos = nx.spring_layout(sentence_graph) pos = nx.spring_layout( sentence_graph, k=1.3, iterations=100, fixed=None, center=(0, 0), scale=4 ) nx.draw( sentence_graph, with_labels=True, node_color="skyblue", edge_cmap=plt.cm.Blues, pos=pos, ) plt.show()
false
0
8,846
0
9,864
8,846
69433868
# このノートブックでは、Optiver Realized Volatility PredictionについてのEDA・日本語での内容理解を試みます。 # 内容は随時追加予定です。 # 参考にさせていただきました) # https://www.kaggle.com/chumajin/optiver-realized-eda-for-starter-version # 基本的には自分のメモですが、コメント・ご指摘など大歓迎です! # ## コンペ概要(Overviewの翻訳など) # You will have hundreds of millions of rows of highly granular financial data at your fingertips, with which you'll design your model forecasting volatility over 10-minute periods. # → ビッグデータから10分間のボラティリティを予測するモデルを設計します。 # ボラティリティって? # → 価格変動のようです。urlはボラティリティの説明。 # https://www.smbcnikko.co.jp/terms/japan/ho/J0280.html # チュートリアルノートブックがあるよ # → こちら # https://www.kaggle.com/jiashenliu/introduction-to-financial-concepts-and-data # 評価方法は? # → RMEPS:二乗平均平方根パーセント誤差 # ## データ # **テストセットの最初の数行のみをダウンロードできるコード競争です。** # → 提出ファイルは三行だけですが、実際はたくさんデータがあるようです(テストデータの最初の三行だけを読み込んでいる。) # 以下はgoogle翻訳べた張り # ### book_ [train / test] .parquet # 区切られた寄木細工のファイルstock_id。市場に投入された最も競争力のある売買注文に関するオーダーブックデータを提供します。本の上位2つのレベルが共有されます。本の第1レベルは価格面でより競争力があり、第2レベルよりも実行が優先されます。 # * ```stock_id``` -株式のIDコード。すべてのストックIDがすべてのタイムバケットに存在するわけではありません。Parquetは、ロード時にこの列をカテゴリデータ型に強制します。あなたはそれをint8に変換したいかもしれません。 # * ```time_id``` -タイムバケットのIDコード。時間IDは必ずしも連続している必要はありませんが、すべての在庫で一貫しています。 # * ```seconds_in_bucket``` -バケットの開始からの秒数。常に0から始まります。 # * ```bid_price[1/2]``` -最も競争力のある購入レベルの正規化された価格。 # * ```ask_price[1/2]``` -最も競争力のある販売レベルの正規化された価格。 # * ```bid_size[1/2]``` -最も競争力のある購入レベルでの株式数。 # * ```ask_size[1/2]``` -最も競争力のある/ 2番目に競争力のある販売レベルの株式数。 # ### trade_[train/test].parquet # 区切られた寄木細工のファイルstock_id。実際に実行された取引に関するデータが含まれています。通常、市場では、実際の取引よりも受動的な売買意図の更新(本の更新)が多いため、このファイルは注文書よりもまばらであると予想される場合があります。 # * ```stock_id``` - 同上。 # * ```time_id``` - 同上。 # * ```seconds_in_bucket```- 同上。トレードデータとブックデータは同じ時間枠から取得され、トレードデータは一般にまばらであるため、このフィールドは必ずしも0から始まるとは限らないことに注意してください。 # * ```price``` -1秒間に発生する実行済みトランザクションの平均価格。価格は正規化されており、平均は各取引で取引された株式数によって加重されています。 # * ```size``` -取引された株式の総数。 # * ```order_count``` -発生している固有の取引注文の数。 # ### train.csv # * ```stock_id``` -上記と同じですが、これはcsvであるため、列はカテゴリではなく整数として読み込まれます。 # * ```time_id``` - 同上。 # * ```target``` -同じstock / time_idでの特徴データに続いて、10分間のウィンドウで計算された実現ボラティリティ。フィーチャデータとターゲットデータの間に重複はありません。詳細については、チュートリアルノートブックをご覧ください。 # ### test.csv # 他のデータファイルと送信ファイルの間のマッピングを提供します。他のテストファイルと同様に、ほとんどのデータは、送信時にノートブックでのみ利用可能であり、最初の数行のみがダウンロード可能です。 # * ```stock_id``` - 同上。 # * ```time_id``` - 同上。 # * ```row_id``` -送信行の一意の識別子。既存の時間ID /在庫IDのペアごとに1つの行があります。各時間枠には、必ずしもすべての個別の株式が含まれているわけではありません。 # ### sample_submission.csv # 正しい形式のサンプル送信ファイル。 # * ```row_id``` -test.csvと同じです。 # * ```target``` -train.csvと同じ定義。ベンチマークは、train.csvのターゲット値の中央値を使用しています。 # ## コンテスト Q&A (抜粋) # https://www.kaggle.com/c/optiver-realized-volatility-prediction/discussion/249752 # 同じstock_idは、すべての競合データセットの同じ株式を表していますか? # → Yes # time_idについて # → time_idはランダムにシャッフルされるため、異なるデータセット間のブリッジとして機能する以外の情報は含まれないことに注意してください;つまり、time_idは時系列データとはならないようです。 # 3行しかテストデータがないよ # → コードをコミットすると、コードはバックグラウンドで設定された実際のテストに対して実行され、リーダーボードが更新されます。(裏にデータがある) # ## 金融の基礎知識;キーワードなど # https://www.kaggle.com/jiashenliu/introduction-to-financial-concepts-and-data # などを参考にしましています # #### WAP; Weighted averaged price # ノートブックを見ていると、重要な特徴量としてよく出てきます。こんな感じで実装されがち # ``` # def calc_wap(df): # wap = (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1'])/(df['bid_size1'] + df['ask_size1']) # ``` # オーダーブックから計算されます。日本語記事(Volume Weighted Average Price)はこちら # https://oneinvest.jp/vwap/ # ``` # つまり、VWAP(その日の平均約定価格)を知ることができれば、VWAP価格で株を買えば少なくともその日に高値掴みすることはなくなり、平均的な価格で取引ができることになります。 # ``` # とのことです。 # ## データセットの確認 import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os # 基本ライブラリの読み込み import matplotlib.pyplot as plt import seaborn as sns # ざっくりとしたコンペの内容は以下の通り。 # * 各銘柄(stock_id)で、特定の時間区切り(time_id)の10分間の時系列データ(seconds_in_bucket)があり、その時間区切りでの価格変動の大小についてボラティリティ(今回のtarget)として評価している。 # * ~~ある銘柄(stock_id=0)の、とある時間区切り(time_id=4, 32, 34)のボラティリティを予測しよう。ただし、time_id=4は初めの少しだけしか時系列データがないし、time_id=32, 34にはデータがないよ~~ # → 上訂正:裏にデータを隠していて、今回作ったモデルでいろんなデータのボラティリティを測定するよ! # インプットデータの中身を確認します。 # /optiver-realized-volatility-predictionに格納されています。今回のコンペで読み込むデータは以下の6つ。 # * book_test.parquet # * book_train.parquet # * trade_test.parquet # * trade_train.parquet # * test.csv # * train.csv # trainとなるbook_test.parquet(trade_test.paruet)には'stock_id=0'のデータのみが格納されているのに対して、testとなるbook_train.parquet(trade_train.paruet)には種々のstock_idのデータが格納されています。なお、stock_idはある特定の銘柄のようです。 print("***") print("book_test.parquet") print("***") print("book_train.parquet") # stock_id=0について、bookのtestとtrainの中身を確認します。 # 基本的には同じデータが入っており、違いはtime_idのみのようです。あと、testデータはtime_id=4のデータで3行のseconds_in_bucketのみ(裏にテストデータが隠されている)。 book_testparquet = pd.read_parquet( "../input/optiver-realized-volatility-prediction/book_test.parquet/stock_id=0" ) book_testparquet book_trainparquet = pd.read_parquet( "../input/optiver-realized-volatility-prediction/book_train.parquet/stock_id=0" ) book_trainparquet # 1000行までのデータを可視化すると、それぞれのtime_id(銘柄)でseconds_in_bucket(時刻)が純増している様子が見れます。 # time_idごとにデータ数(行数)は違うようです(min73-max549)が、seconds_in_bucketは0から始まって600で終了するようです(1秒データが600で10分間)。 # barplotと統計データを見ると、time_idごとのデータ数は平均で200くらいのようです。 plt.plot(np.array(book_trainparquet["time_id"][:1000])) plt.show() plt.plot(np.array(book_trainparquet["seconds_in_bucket"][:1000])) plt.show() print(book_trainparquet["time_id"].value_counts()) print() print(book_trainparquet["time_id"].value_counts().describe()) sns.countplot(book_trainparquet["time_id"]) ax = plt.gca() ax.axes.xaxis.set_ticks([]) plt.show() # time_idが5のデータ(銘柄)について、推移を確認します。seconds_in_bucketを時刻として採用しています。 book_trainparquet[book_trainparquet["time_id"] == 5] book_data = book_trainparquet[book_trainparquet["time_id"] == 5] plt.figure(figsize=(20, 6)) for i, col in enumerate( [ "bid_price1", "ask_price1", "bid_price2", "ask_price2", ] ): plt.plot(book_data["seconds_in_bucket"], book_data[col], "x-", label=col) plt.legend() plt.show() plt.figure(figsize=(20, 6)) for i, col in enumerate( [ "bid_size1", "ask_size1", "bid_size2", "ask_size2", ] ): plt.plot(book_data["seconds_in_bucket"], book_data[col], label=col) plt.legend() plt.show() # bookに続いてtradeのデータについても確認します。 trade_testparquet = pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_test.parquet/stock_id=0" ) trade_testparquet trade_trainparquet = pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=0" ) trade_trainparquet # bookと同じく1000行までのデータを可視化すると、time_id,seconds_in_bucketの推移は似てはいますが、tradeのデータは急峻であることがわかります。 # time_idごとのデータ数は平均で30(min2-max120)くらいのようです。すなわち、tradeのデータは非常にスパース plt.plot(np.array(trade_trainparquet["time_id"][:1000])) plt.show() plt.plot(np.array(trade_trainparquet["seconds_in_bucket"][:1000])) plt.show() print(trade_trainparquet["time_id"].value_counts()) print() print(trade_trainparquet["time_id"].value_counts().describe()) sns.countplot(trade_trainparquet["time_id"]) ax = plt.gca() ax.axes.xaxis.set_ticks([]) plt.show() trade_trainparquet[trade_trainparquet["time_id"] == 5] # trade_dataに関してtime_idが5のデータ(銘柄)について、推移を確認します。seconds_in_bucketを時刻として採用しています。 # 合わせて、book_dataとtrade_dataを合わせて可視化します。trade_dataの'price'がbookデータのbid,askの範囲内で変動していることがわかります。 trade_data = trade_trainparquet[trade_trainparquet["time_id"] == 5] plt.figure(figsize=(20, 6)) for i, col in enumerate( [ "price", ] ): plt.plot(trade_data["seconds_in_bucket"], trade_data[col], "x-", label=col) plt.legend() plt.show() plt.figure(figsize=(20, 6)) for i, col in enumerate( [ "size", "order_count", ] ): plt.plot(trade_data["seconds_in_bucket"], trade_data[col], label=col) plt.legend() plt.show() plt.figure(figsize=(20, 6)) for i, col in enumerate( [ "bid_price1", "ask_price1", "bid_price2", "ask_price2", ] ): plt.plot(book_data["seconds_in_bucket"], book_data[col], "x-", label=col) plt.plot(trade_data["seconds_in_bucket"], trade_data["price"], "o--", lw=3, label=col) plt.legend() plt.show() # 今回の目的変数となるtarget(=ボラティリティ)についてです。 # ボラティリティは価格変動の度合いのようです。 # https://www.smbcnikko.co.jp/terms/japan/ho/J0280.html # ~~testデータを読み込みます。今回のコンペではここの各行についてtargetを計算せよとのこと(つまり、submissionのデータは三行)~~ # ~~ここで振り返ると、stock_id=0に関しては、time_id=4のデータ(bookもtradeも)しかないので、time_id=32, 34の予測には工夫が必要なようです。~~ test = pd.read_csv("../input/optiver-realized-volatility-prediction/test.csv") test book_testparquet trade_testparquet # trainに関しては、targetはstock_id(銘柄)ごとにtime_id, targetが設定されています。 train = pd.read_csv("../input/optiver-realized-volatility-prediction/train.csv") train # trainのターゲットの値を分析します。統計量、分布については以下。 print(train["target"].describe()) # sns.displot(train["target"]) plt.show() # 始めの10000行データを出力しました。trainのデータはかなり密に入っていることがわかります。 plt.plot(train[:10000]["stock_id"]) plt.show() # plt.plot(train[:10000]["time_id"]) plt.show() # plt.plot(train[:10000]["target"]) plt.show() train[train["stock_id"] == 1] # stock_id=0,1,2の三銘柄について、time_idでの推移(便宜的に)を見ますと、なんとなく関連性があり、同じタイミング(time_id)でtargetが大きくなっているようにも見受けられます。 # ~~このあたりの関係性をもって、stock_id=0のtime_id=32, 34のtargetを予測することになりそうです。ただし、time_id=32, 34の直接的なデータはないようです。近いところだと、time_id=31と62。~~ # → 提供されているデータは実際のテストデータとは違うため、この辺は気にしなくてよさそう。 for tt in [ train[train["stock_id"] == 0], train[train["stock_id"] == 1], train[train["stock_id"] == 2], ]: plt.plot(tt[:100]["time_id"], tt[:100]["target"]) plt.xlabel("time_id") plt.ylabel("target") plt.show() # ## EDA(簡易); stock_idでの分析 # stock_idごとにtargetの分布に差異があるようです。 規格化すればおよそ同じには見える、、、? # from sklearn.preprocessing import PowerTransformer for id in [1, 2, 10, 30, 100]: # xx = np.array(train[train["stock_id"] == id]["target"]) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) plt.xlim([0, 0.04]) sns.distplot(xx, ax=ax1) # plt.xlim([-3, 3]) scaler = PowerTransformer() sns.distplot(scaler.fit_transform(xx.reshape(-1, 1)), ax=ax2) # plt.show() # stock_idごとにtargetの統計量を算出します。 stock = ( train.groupby("stock_id")["target"] .agg(["mean", "median", "std", "count", "sum"]) .reset_index() ) stock stock.describe() for i in ["mean", "median", "std"]: sns.displot(stock[i]) plt.show() # 一応、銘柄ごとにボラティリティの大きさに違いがあるようなので、銘柄の統計量情報だけでも少しだけ予測できそう。 # → 銘柄のボラティリティの平均値('mean')が大きいほどボラティリティが大きい傾向にある(ほんの少し)。 train_info = train.copy() train_info["mean"] = train["stock_id"].map(dict(zip(stock["stock_id"], stock["mean"]))) train_info["median"] = train["stock_id"].map( dict(zip(stock["stock_id"], stock["median"])) ) train_info["std"] = train["stock_id"].map(dict(zip(stock["stock_id"], stock["std"]))) train_info # https://pythondatascience.plavox.info/seaborn/%E6%95%A3%E5%B8%83%E5%9B%B3%E3%83%BB%E5%9B%9E%E5%B8%B0%E3%83%A2%E3%83%87%E3%83%AB from sklearn.metrics import r2_score # for col in ["mean", "median", "std"]: # xx = train_info[col] yy = train_info["target"] # print(col) print("r2 score:", r2_score(xx, yy)) # plt.plot(xx, yy, "x") # coef = np.polyfit(xx, yy, 1) y_pred = coef[0] * xx + coef[1] print("fit 1d:", coef[0], "x+", coef[1]) # plt.plot(y_pred, xx, "k-") # plt.show() stock # stockごとに統計量がどういった形をしているかを改めて確認 plt.plot(np.sort(np.array(stock["mean"])), "x") plt.show() plt.plot(np.sort(np.array(stock["median"])), "x") plt.show() plt.plot(np.sort(np.array(stock["std"])), "x") plt.show() plt.figure(figsize=(5, 5)) plt.plot(stock["mean"], stock["median"], "o") plt.show() plt.figure(figsize=(5, 5)) plt.plot(stock["mean"], stock["std"], "o") plt.show() plt.figure(figsize=(5, 5)) plt.plot(stock["median"], stock["std"], "o") plt.show() trade_trainparquet # stock_idごとにvoalitityとprice,size,order_countの関係の確認 import glob ld = glob.glob("../input/optiver-realized-volatility-prediction/book_train.parquet/*") list_price = [] list_size = [] list_order_count = [] for i in stock["stock_id"]: list_price.append( np.mean( pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=" + str(i) )["price"] ) ) list_size.append( np.mean( pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=" + str(i) )["size"] ) ) list_order_count.append( np.mean( pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=" + str(i) )["order_count"] ) ) # labels = ["price", "size", "order_count"] for i, l in enumerate([list_price, list_size, list_order_count]): # plt.figure(figsize=(5, 5)) plt.plot(stock["mean"], l, "o") plt.ylabel(labels[i]) plt.xlabel("mean volatility") plt.show() pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=" + str(i) ) book_trainparquet_list.append() from sklearn.cluster import KMeans nc = 5 cust_array = stock.iloc[:, 1:4] labels = KMeans(n_clusters=nc).fit_predict(cust_array) plt.figure(figsize=(5, 5)) for i in range(nc): xx = stock["mean"][labels == i] yy = stock["std"][labels == i] plt.plot(xx, yy, "o") plt.show() # ## 簡易モデルの作成(線形回帰) # おためしで、統計量(median)から回帰 # →なお、スコアは全然ダメダメです。 xcol = "median" y = np.array(train_info["target"]) X = np.array(train_info[xcol]).reshape(-1, 1) from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.svm import SVR from sklearn.metrics import r2_score from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import PowerTransformer scaler = StandardScaler() # scaler = PowerTransformer() scaler.fit(X) clf = LinearRegression() # clf = Lasso() # clf = SVR() clf.fit(scaler.transform(X), y) y_pred = clf.predict(scaler.transform(X)) plt.figure(figsize=(4, 4)) plt.plot(y, y_pred, "x") min = np.min(np.array(np.min(y), np.min(y_pred))) max = np.max(np.array(np.max(y), np.max(y_pred))) # plt.plot([0.9 * min, 1.1 * max], [0.9 * min, 1.1 * max], "k-") plt.xlabel("Actual") plt.ylabel("Estimated") print(r2_score(y, y_pred)) plt.show() # ## 一旦ここで提出(Score 0.98) # 提出条件の確認のため、上の雑なモデルで一旦Submissionしてみます。testデータから作成します。stock_idを読み込んで、対応するstock_idのmeanを読み取り、そこから回帰式で予測値を返す簡易プログラムになっています。 # これでも無事、提出されるようです;スコアはさんざん(0.98)ですが、、、 # なお、下手に回帰するよりも'median'をそのまま入れる方がスコアが高いよう(0.68)です。→targetを差分にした方が扱いやすい?? # !注意 インターネット接続は切ること test X_test = np.array( test["stock_id"].map(dict(zip(stock["stock_id"], stock[xcol]))) ).reshape(-1, 1) X_test # y_pred = X_test y_pred = clf.predict(scaler.transform(X_test)) y_pred sub = test.copy() sub["target"] = y_pred sub = sub.drop("stock_id", axis=1) sub = sub.drop("time_id", axis=1) sub sub.to_csv("submission.csv", index=False) # ## スコア # 今回のスコアはrmspeです。ので、準備。 # def rmspe(y_true, y_pred): return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))) # # print(rmspe(y,y_pred)) # ## EDA(簡易); time_idでの分析 # time_idごとにボラティリティの違いがありそうですね(つまり、相場が大きく変動するタイミングは銘柄全体で変動が大きい) # → time_id(の統計量)を説明変数に入れる意味はありそう。 train["time_id"].describe() fig, ax = plt.subplots(1, 5, figsize=(20, 4)) for i, id in enumerate([5, 11, 16, 31, 62]): # # plt.xlim([0,0.05]) sns.distplot(train[train["time_id"] == id]["target"], ax=ax[i]) plt.show() time = ( train.groupby("time_id")["target"] .agg(["mean", "median", "std", "count", "sum"]) .reset_index() ) time time.describe() for i in ["mean", "median", "std"]: sns.displot(time[i]) plt.show() train_info = train.copy() train_info["mean"] = train["time_id"].map(dict(zip(time["time_id"], time["mean"]))) train_info["median"] = train["time_id"].map(dict(zip(time["time_id"], time["median"]))) train_info["std"] = train["time_id"].map(dict(zip(time["time_id"], time["std"]))) train_info from sklearn.metrics import r2_score # for col in ["mean", "median", "std"]: # xx = train_info[col] yy = train_info["target"] # print(col) print("r2 score:", r2_score(xx, yy)) # plt.plot(xx, yy, "x") # coef = np.polyfit(xx, yy, 1) y_pred = coef[0] * xx + coef[1] print("fit 1d:", coef[0], "x+", coef[1]) # plt.plot(y_pred, xx, "k-") # plt.show() # ## EDA; book/tradeデータ # 各銘柄(stock_idの推移を比較します) train_list = [ 0, 31, stock["stock_id"][stock["mean"].idxmax()], # ボラティリティの大きな銘柄 stock["stock_id"][stock["mean"].idxmin()], # ボラティリティの小さな銘柄 ] book_trainparquet_list = [] trade_trainparquet_list = [] for i in train_list: book_trainparquet_list.append( pd.read_parquet( "../input/optiver-realized-volatility-prediction/book_train.parquet/stock_id=" + str(i) ) ) trade_trainparquet_list.append( pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=" + str(i) ) ) train_list for t, tt in enumerate( [5, time["time_id"][time["mean"].idxmin()], time["time_id"][time["mean"].idxmax()]] ): # plt.figure(figsize=(20, 5)) # print("time_id == ", tt) # for i, trade_t in enumerate(trade_trainparquet_list): # trade_data = trade_t[trade_t["time_id"] == tt] plt.plot( trade_data["seconds_in_bucket"], trade_data["price"], "o--", label="stock_id=" + str(train_list[i]), ) plt.legend() plt.show() # 時系列データをヒートマップとして可視化 # 暫定で stock_id=1, time_id=5で規格化 from sklearn.preprocessing import StandardScaler i = 3 trade_data = trade_trainparquet_list[i][trade_trainparquet_list[i]["time_id"] == 5] scaler = StandardScaler() scaler.fit(trade_data.iloc[:, 2:]) for i, trade_t in enumerate(trade_trainparquet_list): # print("stock_id=" + str(train_list[i])) trade_data = trade_t[trade_t["time_id"] == 5] # td = scaler.transform(trade_data.iloc[:, 2:]) # heat = np.zeros([3, 600]) heat[:, :] = np.nan # for i, sec in enumerate(trade_data["seconds_in_bucket"]): heat[0, sec] = td[i, 0] heat[1, sec] = td[i, 1] heat[2, sec] = td[i, 2] # heat = pd.DataFrame(heat.T) heat = heat.interpolate() # plt.figure(figsize=(20, 3)) sns.heatmap(heat.T) plt.show() # ## モデル(簡易)の構築と提出(Score 0.52) # stock_id, time_idの統計量を使ってtargetの回帰を行います。 # なお、xgboostが一番成績が良かったので、こいつで提出したらPublic Scoreは0.52071でした。 train_info = train.copy() train_info["stock_id_mean"] = train["stock_id"].map( dict(zip(stock["stock_id"], stock["mean"])) ) train_info["stock_id_median"] = train["stock_id"].map( dict(zip(stock["stock_id"], stock["median"])) ) train_info["stock_id_std"] = train["stock_id"].map( dict(zip(stock["stock_id"], stock["std"])) ) train_info["time_id_mean"] = train["time_id"].map( dict(zip(time["time_id"], time["mean"])) ) train_info["time_id_median"] = train["time_id"].map( dict(zip(time["time_id"], time["median"])) ) train_info["time_id_std"] = train["time_id"].map( dict(zip(time["time_id"], time["std"])) ) train_info from sklearn.model_selection import KFold kf = KFold(n_splits=5, random_state=12345678, shuffle=True) y = np.array(train["target"]) X = np.array(train_info.iloc[:, 3:]) # LinearRegression,Lasso,Ridge,SVR,XGBRegressorを比較 # XGBRegressorの成績が良い(ハイパーパラメータチューニングはしていない)。 from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.svm import SVR import xgboost as xgb from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import PowerTransformer scaler = StandardScaler() # scaler = PowerTransformer() scaler.fit(X) models = [LinearRegression(), Lasso(), Ridge(), SVR(), xgb.XGBRegressor()] scores = [] for clf in models: scores = [] print(clf) plt.figure(figsize=(4, 4)) for train_index, test_index in kf.split(X, y): # X_train = scaler.transform(X[train_index]) y_train = y[train_index] # X_test = scaler.transform(X[test_index]) y_test = y[test_index] # clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # plt.plot(y_pred, y_test, "x") # scores.append((rmspe(y_test, y_pred))) # print(np.mean(np.array(scores))) # min = np.min(np.array(np.min(y_test), np.min(y_pred))) max = np.max(np.array(np.max(y_test), np.max(y_pred))) # plt.plot([0.9 * min, 1.1 * max], [0.9 * min, 1.1 * max], "k-") # plt.show() # XGBRegressorでSubmissionを作成します。 test_info = test.copy() test_info["stock_id_mean"] = test["stock_id"].map( dict(zip(stock["stock_id"], stock["mean"])) ) test_info["stock_id_median"] = test["stock_id"].map( dict(zip(stock["stock_id"], stock["median"])) ) test_info["stock_id_std"] = test["stock_id"].map( dict(zip(stock["stock_id"], stock["std"])) ) test_info["time_id_mean"] = test["time_id"].map( dict(zip(time["time_id"], time["mean"])) ) test_info["time_id_median"] = test["time_id"].map( dict(zip(time["time_id"], time["median"])) ) test_info["time_id_std"] = test["time_id"].map(dict(zip(time["time_id"], time["std"]))) test_info clf = models[4] print(clf) X_test = np.array(test_info.iloc[:, 3:]) clf.fit(scaler.transform(X), y) y_pred = clf.predict(scaler.transform(X_test)) y_pred sub = test.copy() sub["target"] = y_pred sub = sub.drop("stock_id", axis=1) sub = sub.drop("time_id", axis=1) sub sub.to_csv("submission.csv", index=False) # ## モデル(ハイパーパラメータチューニング)の構築と提出 # optunaを使ってチューニングします。下敷きはこちら # https://www.kaggle.com/matsuosan/japanese-xgb-lgbm-voting-optuna-boston import optuna y = np.array(train["target"]) X = np.array(train_info.iloc[:, 3:]) train_info test_info # xgboostの最適化を選択します。 from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import PowerTransformer scaler = StandardScaler() # scaler = PowerTransformer() scaler.fit(X) # ``` # <bound method XGBModel.get_params of XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1, # colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1, # importance_type='gain', interaction_constraints='', # learning_rate=0.300000012, max_delta_step=0, max_depth=6, # min_child_weight=1, missing=nan, monotone_constraints='()', # n_estimators=100, n_jobs=4, num_parallel_tree=1, random_state=0, # reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, # tree_method='exact', validate_parameters=1, verbosity=None)> # ``` import xgboost as xgb def objective_xgb(trial): # if trial.number == 0: learning_rate = trial.suggest_loguniform("learning_rate", 0.3, 0.3) gamma = trial.suggest_loguniform("gamma", 1e-8, 1e-8) max_depth = trial.suggest_int("max_depth", 6, 6) min_child_weight = trial.suggest_loguniform("min_child_weight", 1.0, 1.0) # max_delta_step = trial.suggest_uniform('max_delta_step', 1e-10, 1e-10) subsample = trial.suggest_uniform("subsample", 1.0, 1.0) reg_lambda = trial.suggest_uniform("reg_lambda", 1.0, 1.0) reg_alpha = trial.suggest_uniform("reg_alpha", 0.0, 0.0) else: learning_rate = trial.suggest_loguniform("learning_rate", 1e-8, 1.0) gamma = trial.suggest_loguniform("gamma", 1e-15, 1e-5) max_depth = trial.suggest_int("max_depth", 1, 20) min_child_weight = trial.suggest_loguniform("min_child_weight", 1e-8, 1e3) # max_delta_step = trial.suggest_uniform('max_delta_step', 0, 1.0) subsample = trial.suggest_uniform("subsample", 0.0, 1.0) reg_lambda = trial.suggest_uniform("reg_lambda", 0.0, 1000.0) reg_alpha = trial.suggest_uniform("reg_alpha", 0.0, 1000.0) # reg_alpha = trial.suggest_loguniform('reg_alpha', 1e-15, 1e4) # clf = xgb.XGBRegressor( learning_rate=learning_rate, subsample=subsample, max_depth=max_depth, min_child_weight=min_child_weight, max_delta_step=0, # 1e-10で発散したため、0で固定 reg_lambda=reg_lambda, gamma=gamma, reg_alpha=reg_alpha, # objective='reg:squarederror' ) scores = [] for train_index, test_index in kf.split(X, y): # X_train = scaler.transform(X[train_index]) y_train = y[train_index] # X_test = scaler.transform(X[test_index]) y_test = y[test_index] # clf.fit(X_train, y_train) # y_pred = clf.predict(X_test) # scores.append((rmspe(y_test, y_pred))) # return np.mean(np.array(scores)) # optunaの試行数はtimeoutで時間で設定 optuna.logging.disable_default_handler() # Optunaの出力を抑制する # optuna.logging.enable_default_handler() # Optunaで出力する # n_trials = 5 # # optuna study = optuna.create_study() # study.optimize(objective_xgb, n_trials=n_trials) # study.optimize(objective_xgb, timeout=60*2) # study.optimize(objective_xgb, timeout=3600*5) print("best_params") print(study.best_params) print("best_value") print(study.best_value) print("best_trial") print(study.best_trial) study_score = np.array([x.value for x in study.trials[:]]) plt.figure(figsize=(30, 6)) plt.plot(study_score, "o") plt.yscale("log") plt.plot([0, len(study_score)], [study_score[0], study_score[0]], "k-") plt.show() plt.figure(figsize=(30, 6)) plt.plot(np.sort(study_score), "x-") plt.yscale("log") plt.plot([0, len(study_score)], [study_score[0], study_score[0]], "k-") plt.show() df_study = study.trials_dataframe() df_study.to_csv("optuna.csv") df_study sort = np.argsort(np.array(df_study["value"])) df_study.sort_values("value") for col in df_study.columns[5:-1]: print(col) plt.figure(figsize=(30, 6)) plt.plot(df_study[col], "o") plt.show() plt.figure(figsize=(30, 6)) plt.plot(np.array(df_study.sort_values("value")[col]), "o") plt.show() import pickle clf = xgb.XGBRegressor( **study.best_params, # objective='reg:squarederror' ) with open("model.pickle", mode="wb") as fp: pickle.dump(clf, fp) print(clf) clf.fit(scaler.transform(X), y) y_pred = clf.predict(scaler.transform(X)) plt.figure(figsize=(4, 4)) plt.plot(y_pred, y, "x") # print(np.mean(np.array((rmspe(y, y_pred))))) # min = np.min(np.array(np.min(y_test), np.min(y_pred))) max = np.max(np.array(np.max(y_test), np.max(y_pred))) # plt.plot([0.9 * min, 1.1 * max], [0.9 * min, 1.1 * max], "k-") # plt.show() plt.barh(train_info.columns[3:][::-1], clf.feature_importances_[::-1]) X_test = np.array(test_info.iloc[:, 3:]) y_pred = clf.predict(scaler.transform(X_test)) y_pred sub = test.copy() sub["target"] = y_pred sub = sub.drop("stock_id", axis=1) sub = sub.drop("time_id", axis=1) sub sub.to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/433/69433868.ipynb
null
null
[{"Id": 69433868, "ScriptId": 18755140, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4989766, "CreationDate": "07/31/2021 01:18:54", "VersionNumber": 44.0, "Title": "Optiver; EDA XGBoost starter(\u65e5\u672c\u8a9e,Japanese)", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 943.0, "LinesInsertedFromPrevious": 25.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 918.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
null
null
null
null
# このノートブックでは、Optiver Realized Volatility PredictionについてのEDA・日本語での内容理解を試みます。 # 内容は随時追加予定です。 # 参考にさせていただきました) # https://www.kaggle.com/chumajin/optiver-realized-eda-for-starter-version # 基本的には自分のメモですが、コメント・ご指摘など大歓迎です! # ## コンペ概要(Overviewの翻訳など) # You will have hundreds of millions of rows of highly granular financial data at your fingertips, with which you'll design your model forecasting volatility over 10-minute periods. # → ビッグデータから10分間のボラティリティを予測するモデルを設計します。 # ボラティリティって? # → 価格変動のようです。urlはボラティリティの説明。 # https://www.smbcnikko.co.jp/terms/japan/ho/J0280.html # チュートリアルノートブックがあるよ # → こちら # https://www.kaggle.com/jiashenliu/introduction-to-financial-concepts-and-data # 評価方法は? # → RMEPS:二乗平均平方根パーセント誤差 # ## データ # **テストセットの最初の数行のみをダウンロードできるコード競争です。** # → 提出ファイルは三行だけですが、実際はたくさんデータがあるようです(テストデータの最初の三行だけを読み込んでいる。) # 以下はgoogle翻訳べた張り # ### book_ [train / test] .parquet # 区切られた寄木細工のファイルstock_id。市場に投入された最も競争力のある売買注文に関するオーダーブックデータを提供します。本の上位2つのレベルが共有されます。本の第1レベルは価格面でより競争力があり、第2レベルよりも実行が優先されます。 # * ```stock_id``` -株式のIDコード。すべてのストックIDがすべてのタイムバケットに存在するわけではありません。Parquetは、ロード時にこの列をカテゴリデータ型に強制します。あなたはそれをint8に変換したいかもしれません。 # * ```time_id``` -タイムバケットのIDコード。時間IDは必ずしも連続している必要はありませんが、すべての在庫で一貫しています。 # * ```seconds_in_bucket``` -バケットの開始からの秒数。常に0から始まります。 # * ```bid_price[1/2]``` -最も競争力のある購入レベルの正規化された価格。 # * ```ask_price[1/2]``` -最も競争力のある販売レベルの正規化された価格。 # * ```bid_size[1/2]``` -最も競争力のある購入レベルでの株式数。 # * ```ask_size[1/2]``` -最も競争力のある/ 2番目に競争力のある販売レベルの株式数。 # ### trade_[train/test].parquet # 区切られた寄木細工のファイルstock_id。実際に実行された取引に関するデータが含まれています。通常、市場では、実際の取引よりも受動的な売買意図の更新(本の更新)が多いため、このファイルは注文書よりもまばらであると予想される場合があります。 # * ```stock_id``` - 同上。 # * ```time_id``` - 同上。 # * ```seconds_in_bucket```- 同上。トレードデータとブックデータは同じ時間枠から取得され、トレードデータは一般にまばらであるため、このフィールドは必ずしも0から始まるとは限らないことに注意してください。 # * ```price``` -1秒間に発生する実行済みトランザクションの平均価格。価格は正規化されており、平均は各取引で取引された株式数によって加重されています。 # * ```size``` -取引された株式の総数。 # * ```order_count``` -発生している固有の取引注文の数。 # ### train.csv # * ```stock_id``` -上記と同じですが、これはcsvであるため、列はカテゴリではなく整数として読み込まれます。 # * ```time_id``` - 同上。 # * ```target``` -同じstock / time_idでの特徴データに続いて、10分間のウィンドウで計算された実現ボラティリティ。フィーチャデータとターゲットデータの間に重複はありません。詳細については、チュートリアルノートブックをご覧ください。 # ### test.csv # 他のデータファイルと送信ファイルの間のマッピングを提供します。他のテストファイルと同様に、ほとんどのデータは、送信時にノートブックでのみ利用可能であり、最初の数行のみがダウンロード可能です。 # * ```stock_id``` - 同上。 # * ```time_id``` - 同上。 # * ```row_id``` -送信行の一意の識別子。既存の時間ID /在庫IDのペアごとに1つの行があります。各時間枠には、必ずしもすべての個別の株式が含まれているわけではありません。 # ### sample_submission.csv # 正しい形式のサンプル送信ファイル。 # * ```row_id``` -test.csvと同じです。 # * ```target``` -train.csvと同じ定義。ベンチマークは、train.csvのターゲット値の中央値を使用しています。 # ## コンテスト Q&A (抜粋) # https://www.kaggle.com/c/optiver-realized-volatility-prediction/discussion/249752 # 同じstock_idは、すべての競合データセットの同じ株式を表していますか? # → Yes # time_idについて # → time_idはランダムにシャッフルされるため、異なるデータセット間のブリッジとして機能する以外の情報は含まれないことに注意してください;つまり、time_idは時系列データとはならないようです。 # 3行しかテストデータがないよ # → コードをコミットすると、コードはバックグラウンドで設定された実際のテストに対して実行され、リーダーボードが更新されます。(裏にデータがある) # ## 金融の基礎知識;キーワードなど # https://www.kaggle.com/jiashenliu/introduction-to-financial-concepts-and-data # などを参考にしましています # #### WAP; Weighted averaged price # ノートブックを見ていると、重要な特徴量としてよく出てきます。こんな感じで実装されがち # ``` # def calc_wap(df): # wap = (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1'])/(df['bid_size1'] + df['ask_size1']) # ``` # オーダーブックから計算されます。日本語記事(Volume Weighted Average Price)はこちら # https://oneinvest.jp/vwap/ # ``` # つまり、VWAP(その日の平均約定価格)を知ることができれば、VWAP価格で株を買えば少なくともその日に高値掴みすることはなくなり、平均的な価格で取引ができることになります。 # ``` # とのことです。 # ## データセットの確認 import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os # 基本ライブラリの読み込み import matplotlib.pyplot as plt import seaborn as sns # ざっくりとしたコンペの内容は以下の通り。 # * 各銘柄(stock_id)で、特定の時間区切り(time_id)の10分間の時系列データ(seconds_in_bucket)があり、その時間区切りでの価格変動の大小についてボラティリティ(今回のtarget)として評価している。 # * ~~ある銘柄(stock_id=0)の、とある時間区切り(time_id=4, 32, 34)のボラティリティを予測しよう。ただし、time_id=4は初めの少しだけしか時系列データがないし、time_id=32, 34にはデータがないよ~~ # → 上訂正:裏にデータを隠していて、今回作ったモデルでいろんなデータのボラティリティを測定するよ! # インプットデータの中身を確認します。 # /optiver-realized-volatility-predictionに格納されています。今回のコンペで読み込むデータは以下の6つ。 # * book_test.parquet # * book_train.parquet # * trade_test.parquet # * trade_train.parquet # * test.csv # * train.csv # trainとなるbook_test.parquet(trade_test.paruet)には'stock_id=0'のデータのみが格納されているのに対して、testとなるbook_train.parquet(trade_train.paruet)には種々のstock_idのデータが格納されています。なお、stock_idはある特定の銘柄のようです。 print("***") print("book_test.parquet") print("***") print("book_train.parquet") # stock_id=0について、bookのtestとtrainの中身を確認します。 # 基本的には同じデータが入っており、違いはtime_idのみのようです。あと、testデータはtime_id=4のデータで3行のseconds_in_bucketのみ(裏にテストデータが隠されている)。 book_testparquet = pd.read_parquet( "../input/optiver-realized-volatility-prediction/book_test.parquet/stock_id=0" ) book_testparquet book_trainparquet = pd.read_parquet( "../input/optiver-realized-volatility-prediction/book_train.parquet/stock_id=0" ) book_trainparquet # 1000行までのデータを可視化すると、それぞれのtime_id(銘柄)でseconds_in_bucket(時刻)が純増している様子が見れます。 # time_idごとにデータ数(行数)は違うようです(min73-max549)が、seconds_in_bucketは0から始まって600で終了するようです(1秒データが600で10分間)。 # barplotと統計データを見ると、time_idごとのデータ数は平均で200くらいのようです。 plt.plot(np.array(book_trainparquet["time_id"][:1000])) plt.show() plt.plot(np.array(book_trainparquet["seconds_in_bucket"][:1000])) plt.show() print(book_trainparquet["time_id"].value_counts()) print() print(book_trainparquet["time_id"].value_counts().describe()) sns.countplot(book_trainparquet["time_id"]) ax = plt.gca() ax.axes.xaxis.set_ticks([]) plt.show() # time_idが5のデータ(銘柄)について、推移を確認します。seconds_in_bucketを時刻として採用しています。 book_trainparquet[book_trainparquet["time_id"] == 5] book_data = book_trainparquet[book_trainparquet["time_id"] == 5] plt.figure(figsize=(20, 6)) for i, col in enumerate( [ "bid_price1", "ask_price1", "bid_price2", "ask_price2", ] ): plt.plot(book_data["seconds_in_bucket"], book_data[col], "x-", label=col) plt.legend() plt.show() plt.figure(figsize=(20, 6)) for i, col in enumerate( [ "bid_size1", "ask_size1", "bid_size2", "ask_size2", ] ): plt.plot(book_data["seconds_in_bucket"], book_data[col], label=col) plt.legend() plt.show() # bookに続いてtradeのデータについても確認します。 trade_testparquet = pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_test.parquet/stock_id=0" ) trade_testparquet trade_trainparquet = pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=0" ) trade_trainparquet # bookと同じく1000行までのデータを可視化すると、time_id,seconds_in_bucketの推移は似てはいますが、tradeのデータは急峻であることがわかります。 # time_idごとのデータ数は平均で30(min2-max120)くらいのようです。すなわち、tradeのデータは非常にスパース plt.plot(np.array(trade_trainparquet["time_id"][:1000])) plt.show() plt.plot(np.array(trade_trainparquet["seconds_in_bucket"][:1000])) plt.show() print(trade_trainparquet["time_id"].value_counts()) print() print(trade_trainparquet["time_id"].value_counts().describe()) sns.countplot(trade_trainparquet["time_id"]) ax = plt.gca() ax.axes.xaxis.set_ticks([]) plt.show() trade_trainparquet[trade_trainparquet["time_id"] == 5] # trade_dataに関してtime_idが5のデータ(銘柄)について、推移を確認します。seconds_in_bucketを時刻として採用しています。 # 合わせて、book_dataとtrade_dataを合わせて可視化します。trade_dataの'price'がbookデータのbid,askの範囲内で変動していることがわかります。 trade_data = trade_trainparquet[trade_trainparquet["time_id"] == 5] plt.figure(figsize=(20, 6)) for i, col in enumerate( [ "price", ] ): plt.plot(trade_data["seconds_in_bucket"], trade_data[col], "x-", label=col) plt.legend() plt.show() plt.figure(figsize=(20, 6)) for i, col in enumerate( [ "size", "order_count", ] ): plt.plot(trade_data["seconds_in_bucket"], trade_data[col], label=col) plt.legend() plt.show() plt.figure(figsize=(20, 6)) for i, col in enumerate( [ "bid_price1", "ask_price1", "bid_price2", "ask_price2", ] ): plt.plot(book_data["seconds_in_bucket"], book_data[col], "x-", label=col) plt.plot(trade_data["seconds_in_bucket"], trade_data["price"], "o--", lw=3, label=col) plt.legend() plt.show() # 今回の目的変数となるtarget(=ボラティリティ)についてです。 # ボラティリティは価格変動の度合いのようです。 # https://www.smbcnikko.co.jp/terms/japan/ho/J0280.html # ~~testデータを読み込みます。今回のコンペではここの各行についてtargetを計算せよとのこと(つまり、submissionのデータは三行)~~ # ~~ここで振り返ると、stock_id=0に関しては、time_id=4のデータ(bookもtradeも)しかないので、time_id=32, 34の予測には工夫が必要なようです。~~ test = pd.read_csv("../input/optiver-realized-volatility-prediction/test.csv") test book_testparquet trade_testparquet # trainに関しては、targetはstock_id(銘柄)ごとにtime_id, targetが設定されています。 train = pd.read_csv("../input/optiver-realized-volatility-prediction/train.csv") train # trainのターゲットの値を分析します。統計量、分布については以下。 print(train["target"].describe()) # sns.displot(train["target"]) plt.show() # 始めの10000行データを出力しました。trainのデータはかなり密に入っていることがわかります。 plt.plot(train[:10000]["stock_id"]) plt.show() # plt.plot(train[:10000]["time_id"]) plt.show() # plt.plot(train[:10000]["target"]) plt.show() train[train["stock_id"] == 1] # stock_id=0,1,2の三銘柄について、time_idでの推移(便宜的に)を見ますと、なんとなく関連性があり、同じタイミング(time_id)でtargetが大きくなっているようにも見受けられます。 # ~~このあたりの関係性をもって、stock_id=0のtime_id=32, 34のtargetを予測することになりそうです。ただし、time_id=32, 34の直接的なデータはないようです。近いところだと、time_id=31と62。~~ # → 提供されているデータは実際のテストデータとは違うため、この辺は気にしなくてよさそう。 for tt in [ train[train["stock_id"] == 0], train[train["stock_id"] == 1], train[train["stock_id"] == 2], ]: plt.plot(tt[:100]["time_id"], tt[:100]["target"]) plt.xlabel("time_id") plt.ylabel("target") plt.show() # ## EDA(簡易); stock_idでの分析 # stock_idごとにtargetの分布に差異があるようです。 規格化すればおよそ同じには見える、、、? # from sklearn.preprocessing import PowerTransformer for id in [1, 2, 10, 30, 100]: # xx = np.array(train[train["stock_id"] == id]["target"]) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) plt.xlim([0, 0.04]) sns.distplot(xx, ax=ax1) # plt.xlim([-3, 3]) scaler = PowerTransformer() sns.distplot(scaler.fit_transform(xx.reshape(-1, 1)), ax=ax2) # plt.show() # stock_idごとにtargetの統計量を算出します。 stock = ( train.groupby("stock_id")["target"] .agg(["mean", "median", "std", "count", "sum"]) .reset_index() ) stock stock.describe() for i in ["mean", "median", "std"]: sns.displot(stock[i]) plt.show() # 一応、銘柄ごとにボラティリティの大きさに違いがあるようなので、銘柄の統計量情報だけでも少しだけ予測できそう。 # → 銘柄のボラティリティの平均値('mean')が大きいほどボラティリティが大きい傾向にある(ほんの少し)。 train_info = train.copy() train_info["mean"] = train["stock_id"].map(dict(zip(stock["stock_id"], stock["mean"]))) train_info["median"] = train["stock_id"].map( dict(zip(stock["stock_id"], stock["median"])) ) train_info["std"] = train["stock_id"].map(dict(zip(stock["stock_id"], stock["std"]))) train_info # https://pythondatascience.plavox.info/seaborn/%E6%95%A3%E5%B8%83%E5%9B%B3%E3%83%BB%E5%9B%9E%E5%B8%B0%E3%83%A2%E3%83%87%E3%83%AB from sklearn.metrics import r2_score # for col in ["mean", "median", "std"]: # xx = train_info[col] yy = train_info["target"] # print(col) print("r2 score:", r2_score(xx, yy)) # plt.plot(xx, yy, "x") # coef = np.polyfit(xx, yy, 1) y_pred = coef[0] * xx + coef[1] print("fit 1d:", coef[0], "x+", coef[1]) # plt.plot(y_pred, xx, "k-") # plt.show() stock # stockごとに統計量がどういった形をしているかを改めて確認 plt.plot(np.sort(np.array(stock["mean"])), "x") plt.show() plt.plot(np.sort(np.array(stock["median"])), "x") plt.show() plt.plot(np.sort(np.array(stock["std"])), "x") plt.show() plt.figure(figsize=(5, 5)) plt.plot(stock["mean"], stock["median"], "o") plt.show() plt.figure(figsize=(5, 5)) plt.plot(stock["mean"], stock["std"], "o") plt.show() plt.figure(figsize=(5, 5)) plt.plot(stock["median"], stock["std"], "o") plt.show() trade_trainparquet # stock_idごとにvoalitityとprice,size,order_countの関係の確認 import glob ld = glob.glob("../input/optiver-realized-volatility-prediction/book_train.parquet/*") list_price = [] list_size = [] list_order_count = [] for i in stock["stock_id"]: list_price.append( np.mean( pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=" + str(i) )["price"] ) ) list_size.append( np.mean( pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=" + str(i) )["size"] ) ) list_order_count.append( np.mean( pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=" + str(i) )["order_count"] ) ) # labels = ["price", "size", "order_count"] for i, l in enumerate([list_price, list_size, list_order_count]): # plt.figure(figsize=(5, 5)) plt.plot(stock["mean"], l, "o") plt.ylabel(labels[i]) plt.xlabel("mean volatility") plt.show() pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=" + str(i) ) book_trainparquet_list.append() from sklearn.cluster import KMeans nc = 5 cust_array = stock.iloc[:, 1:4] labels = KMeans(n_clusters=nc).fit_predict(cust_array) plt.figure(figsize=(5, 5)) for i in range(nc): xx = stock["mean"][labels == i] yy = stock["std"][labels == i] plt.plot(xx, yy, "o") plt.show() # ## 簡易モデルの作成(線形回帰) # おためしで、統計量(median)から回帰 # →なお、スコアは全然ダメダメです。 xcol = "median" y = np.array(train_info["target"]) X = np.array(train_info[xcol]).reshape(-1, 1) from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.svm import SVR from sklearn.metrics import r2_score from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import PowerTransformer scaler = StandardScaler() # scaler = PowerTransformer() scaler.fit(X) clf = LinearRegression() # clf = Lasso() # clf = SVR() clf.fit(scaler.transform(X), y) y_pred = clf.predict(scaler.transform(X)) plt.figure(figsize=(4, 4)) plt.plot(y, y_pred, "x") min = np.min(np.array(np.min(y), np.min(y_pred))) max = np.max(np.array(np.max(y), np.max(y_pred))) # plt.plot([0.9 * min, 1.1 * max], [0.9 * min, 1.1 * max], "k-") plt.xlabel("Actual") plt.ylabel("Estimated") print(r2_score(y, y_pred)) plt.show() # ## 一旦ここで提出(Score 0.98) # 提出条件の確認のため、上の雑なモデルで一旦Submissionしてみます。testデータから作成します。stock_idを読み込んで、対応するstock_idのmeanを読み取り、そこから回帰式で予測値を返す簡易プログラムになっています。 # これでも無事、提出されるようです;スコアはさんざん(0.98)ですが、、、 # なお、下手に回帰するよりも'median'をそのまま入れる方がスコアが高いよう(0.68)です。→targetを差分にした方が扱いやすい?? # !注意 インターネット接続は切ること test X_test = np.array( test["stock_id"].map(dict(zip(stock["stock_id"], stock[xcol]))) ).reshape(-1, 1) X_test # y_pred = X_test y_pred = clf.predict(scaler.transform(X_test)) y_pred sub = test.copy() sub["target"] = y_pred sub = sub.drop("stock_id", axis=1) sub = sub.drop("time_id", axis=1) sub sub.to_csv("submission.csv", index=False) # ## スコア # 今回のスコアはrmspeです。ので、準備。 # def rmspe(y_true, y_pred): return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))) # # print(rmspe(y,y_pred)) # ## EDA(簡易); time_idでの分析 # time_idごとにボラティリティの違いがありそうですね(つまり、相場が大きく変動するタイミングは銘柄全体で変動が大きい) # → time_id(の統計量)を説明変数に入れる意味はありそう。 train["time_id"].describe() fig, ax = plt.subplots(1, 5, figsize=(20, 4)) for i, id in enumerate([5, 11, 16, 31, 62]): # # plt.xlim([0,0.05]) sns.distplot(train[train["time_id"] == id]["target"], ax=ax[i]) plt.show() time = ( train.groupby("time_id")["target"] .agg(["mean", "median", "std", "count", "sum"]) .reset_index() ) time time.describe() for i in ["mean", "median", "std"]: sns.displot(time[i]) plt.show() train_info = train.copy() train_info["mean"] = train["time_id"].map(dict(zip(time["time_id"], time["mean"]))) train_info["median"] = train["time_id"].map(dict(zip(time["time_id"], time["median"]))) train_info["std"] = train["time_id"].map(dict(zip(time["time_id"], time["std"]))) train_info from sklearn.metrics import r2_score # for col in ["mean", "median", "std"]: # xx = train_info[col] yy = train_info["target"] # print(col) print("r2 score:", r2_score(xx, yy)) # plt.plot(xx, yy, "x") # coef = np.polyfit(xx, yy, 1) y_pred = coef[0] * xx + coef[1] print("fit 1d:", coef[0], "x+", coef[1]) # plt.plot(y_pred, xx, "k-") # plt.show() # ## EDA; book/tradeデータ # 各銘柄(stock_idの推移を比較します) train_list = [ 0, 31, stock["stock_id"][stock["mean"].idxmax()], # ボラティリティの大きな銘柄 stock["stock_id"][stock["mean"].idxmin()], # ボラティリティの小さな銘柄 ] book_trainparquet_list = [] trade_trainparquet_list = [] for i in train_list: book_trainparquet_list.append( pd.read_parquet( "../input/optiver-realized-volatility-prediction/book_train.parquet/stock_id=" + str(i) ) ) trade_trainparquet_list.append( pd.read_parquet( "../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=" + str(i) ) ) train_list for t, tt in enumerate( [5, time["time_id"][time["mean"].idxmin()], time["time_id"][time["mean"].idxmax()]] ): # plt.figure(figsize=(20, 5)) # print("time_id == ", tt) # for i, trade_t in enumerate(trade_trainparquet_list): # trade_data = trade_t[trade_t["time_id"] == tt] plt.plot( trade_data["seconds_in_bucket"], trade_data["price"], "o--", label="stock_id=" + str(train_list[i]), ) plt.legend() plt.show() # 時系列データをヒートマップとして可視化 # 暫定で stock_id=1, time_id=5で規格化 from sklearn.preprocessing import StandardScaler i = 3 trade_data = trade_trainparquet_list[i][trade_trainparquet_list[i]["time_id"] == 5] scaler = StandardScaler() scaler.fit(trade_data.iloc[:, 2:]) for i, trade_t in enumerate(trade_trainparquet_list): # print("stock_id=" + str(train_list[i])) trade_data = trade_t[trade_t["time_id"] == 5] # td = scaler.transform(trade_data.iloc[:, 2:]) # heat = np.zeros([3, 600]) heat[:, :] = np.nan # for i, sec in enumerate(trade_data["seconds_in_bucket"]): heat[0, sec] = td[i, 0] heat[1, sec] = td[i, 1] heat[2, sec] = td[i, 2] # heat = pd.DataFrame(heat.T) heat = heat.interpolate() # plt.figure(figsize=(20, 3)) sns.heatmap(heat.T) plt.show() # ## モデル(簡易)の構築と提出(Score 0.52) # stock_id, time_idの統計量を使ってtargetの回帰を行います。 # なお、xgboostが一番成績が良かったので、こいつで提出したらPublic Scoreは0.52071でした。 train_info = train.copy() train_info["stock_id_mean"] = train["stock_id"].map( dict(zip(stock["stock_id"], stock["mean"])) ) train_info["stock_id_median"] = train["stock_id"].map( dict(zip(stock["stock_id"], stock["median"])) ) train_info["stock_id_std"] = train["stock_id"].map( dict(zip(stock["stock_id"], stock["std"])) ) train_info["time_id_mean"] = train["time_id"].map( dict(zip(time["time_id"], time["mean"])) ) train_info["time_id_median"] = train["time_id"].map( dict(zip(time["time_id"], time["median"])) ) train_info["time_id_std"] = train["time_id"].map( dict(zip(time["time_id"], time["std"])) ) train_info from sklearn.model_selection import KFold kf = KFold(n_splits=5, random_state=12345678, shuffle=True) y = np.array(train["target"]) X = np.array(train_info.iloc[:, 3:]) # LinearRegression,Lasso,Ridge,SVR,XGBRegressorを比較 # XGBRegressorの成績が良い(ハイパーパラメータチューニングはしていない)。 from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.svm import SVR import xgboost as xgb from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import PowerTransformer scaler = StandardScaler() # scaler = PowerTransformer() scaler.fit(X) models = [LinearRegression(), Lasso(), Ridge(), SVR(), xgb.XGBRegressor()] scores = [] for clf in models: scores = [] print(clf) plt.figure(figsize=(4, 4)) for train_index, test_index in kf.split(X, y): # X_train = scaler.transform(X[train_index]) y_train = y[train_index] # X_test = scaler.transform(X[test_index]) y_test = y[test_index] # clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # plt.plot(y_pred, y_test, "x") # scores.append((rmspe(y_test, y_pred))) # print(np.mean(np.array(scores))) # min = np.min(np.array(np.min(y_test), np.min(y_pred))) max = np.max(np.array(np.max(y_test), np.max(y_pred))) # plt.plot([0.9 * min, 1.1 * max], [0.9 * min, 1.1 * max], "k-") # plt.show() # XGBRegressorでSubmissionを作成します。 test_info = test.copy() test_info["stock_id_mean"] = test["stock_id"].map( dict(zip(stock["stock_id"], stock["mean"])) ) test_info["stock_id_median"] = test["stock_id"].map( dict(zip(stock["stock_id"], stock["median"])) ) test_info["stock_id_std"] = test["stock_id"].map( dict(zip(stock["stock_id"], stock["std"])) ) test_info["time_id_mean"] = test["time_id"].map( dict(zip(time["time_id"], time["mean"])) ) test_info["time_id_median"] = test["time_id"].map( dict(zip(time["time_id"], time["median"])) ) test_info["time_id_std"] = test["time_id"].map(dict(zip(time["time_id"], time["std"]))) test_info clf = models[4] print(clf) X_test = np.array(test_info.iloc[:, 3:]) clf.fit(scaler.transform(X), y) y_pred = clf.predict(scaler.transform(X_test)) y_pred sub = test.copy() sub["target"] = y_pred sub = sub.drop("stock_id", axis=1) sub = sub.drop("time_id", axis=1) sub sub.to_csv("submission.csv", index=False) # ## モデル(ハイパーパラメータチューニング)の構築と提出 # optunaを使ってチューニングします。下敷きはこちら # https://www.kaggle.com/matsuosan/japanese-xgb-lgbm-voting-optuna-boston import optuna y = np.array(train["target"]) X = np.array(train_info.iloc[:, 3:]) train_info test_info # xgboostの最適化を選択します。 from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import PowerTransformer scaler = StandardScaler() # scaler = PowerTransformer() scaler.fit(X) # ``` # <bound method XGBModel.get_params of XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1, # colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1, # importance_type='gain', interaction_constraints='', # learning_rate=0.300000012, max_delta_step=0, max_depth=6, # min_child_weight=1, missing=nan, monotone_constraints='()', # n_estimators=100, n_jobs=4, num_parallel_tree=1, random_state=0, # reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, # tree_method='exact', validate_parameters=1, verbosity=None)> # ``` import xgboost as xgb def objective_xgb(trial): # if trial.number == 0: learning_rate = trial.suggest_loguniform("learning_rate", 0.3, 0.3) gamma = trial.suggest_loguniform("gamma", 1e-8, 1e-8) max_depth = trial.suggest_int("max_depth", 6, 6) min_child_weight = trial.suggest_loguniform("min_child_weight", 1.0, 1.0) # max_delta_step = trial.suggest_uniform('max_delta_step', 1e-10, 1e-10) subsample = trial.suggest_uniform("subsample", 1.0, 1.0) reg_lambda = trial.suggest_uniform("reg_lambda", 1.0, 1.0) reg_alpha = trial.suggest_uniform("reg_alpha", 0.0, 0.0) else: learning_rate = trial.suggest_loguniform("learning_rate", 1e-8, 1.0) gamma = trial.suggest_loguniform("gamma", 1e-15, 1e-5) max_depth = trial.suggest_int("max_depth", 1, 20) min_child_weight = trial.suggest_loguniform("min_child_weight", 1e-8, 1e3) # max_delta_step = trial.suggest_uniform('max_delta_step', 0, 1.0) subsample = trial.suggest_uniform("subsample", 0.0, 1.0) reg_lambda = trial.suggest_uniform("reg_lambda", 0.0, 1000.0) reg_alpha = trial.suggest_uniform("reg_alpha", 0.0, 1000.0) # reg_alpha = trial.suggest_loguniform('reg_alpha', 1e-15, 1e4) # clf = xgb.XGBRegressor( learning_rate=learning_rate, subsample=subsample, max_depth=max_depth, min_child_weight=min_child_weight, max_delta_step=0, # 1e-10で発散したため、0で固定 reg_lambda=reg_lambda, gamma=gamma, reg_alpha=reg_alpha, # objective='reg:squarederror' ) scores = [] for train_index, test_index in kf.split(X, y): # X_train = scaler.transform(X[train_index]) y_train = y[train_index] # X_test = scaler.transform(X[test_index]) y_test = y[test_index] # clf.fit(X_train, y_train) # y_pred = clf.predict(X_test) # scores.append((rmspe(y_test, y_pred))) # return np.mean(np.array(scores)) # optunaの試行数はtimeoutで時間で設定 optuna.logging.disable_default_handler() # Optunaの出力を抑制する # optuna.logging.enable_default_handler() # Optunaで出力する # n_trials = 5 # # optuna study = optuna.create_study() # study.optimize(objective_xgb, n_trials=n_trials) # study.optimize(objective_xgb, timeout=60*2) # study.optimize(objective_xgb, timeout=3600*5) print("best_params") print(study.best_params) print("best_value") print(study.best_value) print("best_trial") print(study.best_trial) study_score = np.array([x.value for x in study.trials[:]]) plt.figure(figsize=(30, 6)) plt.plot(study_score, "o") plt.yscale("log") plt.plot([0, len(study_score)], [study_score[0], study_score[0]], "k-") plt.show() plt.figure(figsize=(30, 6)) plt.plot(np.sort(study_score), "x-") plt.yscale("log") plt.plot([0, len(study_score)], [study_score[0], study_score[0]], "k-") plt.show() df_study = study.trials_dataframe() df_study.to_csv("optuna.csv") df_study sort = np.argsort(np.array(df_study["value"])) df_study.sort_values("value") for col in df_study.columns[5:-1]: print(col) plt.figure(figsize=(30, 6)) plt.plot(df_study[col], "o") plt.show() plt.figure(figsize=(30, 6)) plt.plot(np.array(df_study.sort_values("value")[col]), "o") plt.show() import pickle clf = xgb.XGBRegressor( **study.best_params, # objective='reg:squarederror' ) with open("model.pickle", mode="wb") as fp: pickle.dump(clf, fp) print(clf) clf.fit(scaler.transform(X), y) y_pred = clf.predict(scaler.transform(X)) plt.figure(figsize=(4, 4)) plt.plot(y_pred, y, "x") # print(np.mean(np.array((rmspe(y, y_pred))))) # min = np.min(np.array(np.min(y_test), np.min(y_pred))) max = np.max(np.array(np.max(y_test), np.max(y_pred))) # plt.plot([0.9 * min, 1.1 * max], [0.9 * min, 1.1 * max], "k-") # plt.show() plt.barh(train_info.columns[3:][::-1], clf.feature_importances_[::-1]) X_test = np.array(test_info.iloc[:, 3:]) y_pred = clf.predict(scaler.transform(X_test)) y_pred sub = test.copy() sub["target"] = y_pred sub = sub.drop("stock_id", axis=1) sub = sub.drop("time_id", axis=1) sub sub.to_csv("submission.csv", index=False)
false
0
11,592
3
11,592
11,592
69433739
# # House price regression # ## Description of the notebook # This notebook is organized in 7 parts. # 1. Data acquisition: we will import the dataset and get a first glance at what it contains. # 2. Dataset exploratory analysis : we will analyze the data, get an understanding of the main features, of their types, if they have missing values... # 3. Features exploratory analysis : we will analyze the data,find the corelation between the features and the survival rate and decide which features to keep # 4. Data cleaning and feature selection : Select features that will be kept in the model and remove others, deal with missing values if there is any, create dummy variables for categories... # 5. Model preparation : prepare the train and test set and prepare the models we will use for the classification # 6. Pipeline evaluation and selection: We will run the models and get their scores, which will allow us the choose the best model. # 7. Predict : Final stage, we will run our final model to execute predictions. # # 1. Data acquisition import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression, SGDRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.svm import SVR from lightgbm import LGBMRegressor from xgboost.sklearn import XGBRegressor from catboost import CatBoostRegressor from sklearn.kernel_ridge import KernelRidge from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, Ridge from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.compose import make_column_transformer from sklearn.preprocessing import FunctionTransformer from sklearn.decomposition import PCA from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.ensemble import VotingRegressor from scipy.stats import skew pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) pd.set_option("display.width", 1000) sns.set_style("darkgrid") import warnings warnings.filterwarnings("ignore") dataset_train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) dataset_test = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ) # # 2. Dataset Exploratory analysis # In this section we will check the number and types of features for the dataset, if there is missing values, if there is corelation between some features, if we can remove some unnecessary features and if we can create new features from the ones we already have def screen_data(df): rows = [] for col in df.columns: rows.append([col, df[col].isnull().sum(), df[col].nunique(), df[col].dtypes]) return pd.DataFrame( rows, columns=["Col", "Missing values", "Unique values", "Type"] ) def screen_missing_values(df, ratio=0.1, verbose=0): total_rows = len(df) total_columns = len(df.columns) df = screen_data(df) df_m = df.loc[df["Missing values"] != 0] # .sort_values(['Type', 'Missing values']) total_missing_values = len(df_m) df_m.loc[:, "Percentage missing values"] = df_m["Missing values"] / total_rows missing_values_less10 = len(df_m.loc[df_m["Percentage missing values"] <= ratio]) missing_values_more10 = len(df_m.loc[df_m["Percentage missing values"] > ratio]) print( "Total missing values : ", total_missing_values, " --> ", round(100 * total_missing_values / total_columns, 0), "% of all features", ) print( "Columns with less than ", round(ratio * 100, 0), "% missing values : ", missing_values_less10, " --> ", round(100 * missing_values_less10 / total_missing_values, 0), "% of features with missing values", ) print( "Columns with more than ", round(ratio * 100, 0), "% missing values : ", missing_values_more10, " --> ", round(100 * missing_values_more10 / total_missing_values, 0), "% of features with missing values", ) if verbose >= 1: print("") print( df_m.drop(columns=["Unique values", "Type"]).loc[ df_m["Percentage missing values"] > ratio ] ) # ## 2.1 Dataframe information dataset_train.shape, dataset_test.shape # The train and the test set are the same size, so it's going to be important not to overfit the model. dataset_train.head(10) df = screen_data(dataset_train) df.T # ### Missing values screen_missing_values(dataset_train, verbose=1) screen_missing_values(dataset_test, verbose=1) # In the dataset test there is more columns with missing values, but most of them have less than 10% missing values. # The column with more than 10% missing values are the same in train and test. # The columns might have missing values to indicate an absence of pool or fireplace for example, we will have to analyse things a bit more to conclude. # ### Types of data print( "Categorical features : ", len( dataset_train.drop(columns=["Id", "SalePrice"]) .select_dtypes(include="object") .columns ), ) print( "Numerical features : ", len( dataset_train.drop(columns=["Id", "SalePrice"]) .select_dtypes(exclude="object") .columns ), ) print("Total features : ", len(dataset_train.drop(columns=["Id", "SalePrice"]).columns)) # A lot of numerical and categorical data # ## 2.2 Data description # Based on this very small analysis we can see that we have 79 different features. # In order to start analysing we will have to choose some feature we think might give us a good aproximation of the sale price. # It is not possible to carry an extensive EDA on all the features. # Let's if we can drop some feature which give us the same info (abs(corr) > 0.85), or that represent the same idea. # ## Let's check the document describing the data # ### Columns we will analyse # - MSZoning because there is probably a difference between an agriculture zone, an industrial zone and a residential zone. # - Lot area, which is the size of the lot # - Neighborhood, probably the price is not the same depending on the neighborhood. # - OverallQual & OverallCond # - YearBuilt & YearRemodAdd # - MoSold, YrSold: maybe the time of year has an impact on the price # - SaleType # - Bedroom & TotRmsAbvGrd # - TotalSF = TotalBsmtSF + 1stFlrSF + 2ndFlrSF # ### Columns we will transform # - OverallQual & OverallCond, we can probably aggregate thse columns together # - YearBuilt & YearRemodAdd # - Check if all these columns ExterQual, ExterCond, BsmtCond, BsmtFinType1, BsmtFinType2, HeatingQC, KitchenQual, GarageQual and GarageCond give us a good aproximation of overall quality and overall condition or if there was some errors during the calculation # - TotalSF = TotalBsmtSF + 1stFlrSF + 2ndFlrSF # - TotalBath = FullBath + HalfBath + BsmtFullBath + BsmtHalfBath # - TotalPorch = WoodDeckSF + openPorchSF + EnclosedPorch + 3SsnPorch + ScreenPorch # ### Columns we will probably drop # - LandSlope or LandContour, because they contain the same information # - The columns with a lot of missing data, unless they provide very valuable data and we can consider that missing data is just an abscence of pool or fireplace. # # 3. Feature exploratory analysis # Let's start with two categorical data that describe the surroundings of the house MSZoning and Neighborhood. fig, ax = plt.subplots(1, 2, figsize=(22, 5)) sns.boxplot(x="MSZoning", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs MSZoning") sns.countplot(x="MSZoning", data=dataset_train, ax=ax[1]) ax[1].set_title("Count per MSZoning") order = ( dataset_train.groupby("Neighborhood") .agg(np.mean) .sort_values("SalePrice") .index.to_list() ) fig, ax = plt.subplots(2, 1, figsize=(22, 10)) sns.boxplot(x="Neighborhood", y="SalePrice", data=dataset_train, order=order, ax=ax[0]) ax[0].set_title("Price vs Neighborhood") sns.countplot(x="Neighborhood", data=dataset_train, order=order, ax=ax[1]) ax[1].set_title("Price vs Neighborhood") order = ( dataset_train.groupby("Neighborhood") .agg(np.mean) .sort_values("SalePrice") .index.to_list() ) fig, ax = plt.subplots(1, 1, figsize=(22, 7)) sns.boxplot( x="Neighborhood", y="SalePrice", hue="MSZoning", data=dataset_train, order=order, ax=ax, ) ax.set_title("Price vs Neighborhood") # #### Things we can learn from the previous analysis # - The Neighborhood seems to have a big impact on the price # - Inside a neighborhood, the price can change depending on the zone for example in Crawfor. # ### Let's continue witht he size of the house # We will check the lot size and the size of the house. dataset_train["TotalSF"] = ( dataset_train["1stFlrSF"] + dataset_train["2ndFlrSF"] + dataset_train["TotalBsmtSF"] ) fig, ax = plt.subplots(2, 3, figsize=(22, 10)) sns.scatterplot(x="LotArea", y="SalePrice", data=dataset_train, ax=ax[0][0]) ax[0][0].set_title("SalePrice vs LotArea") sns.scatterplot(x="TotalSF", y="SalePrice", data=dataset_train, ax=ax[0][1]) ax[0][1].set_title("SalePrice vs TotalSF") sns.scatterplot( x="GrLivArea", y="SalePrice", data=dataset_train, ax=ax[0][2] ) # LowQualFinSF or BsmtUnfSF ax[0][2].set_title("SalePrice vs GrLivArea") sns.scatterplot(x="1stFlrSF", y="SalePrice", data=dataset_train, ax=ax[1][0]) ax[1][0].set_title("SalePrice vs 1stFlrSF") sns.scatterplot(x="2ndFlrSF", y="SalePrice", data=dataset_train, ax=ax[1][1]) ax[1][1].set_title("SalePrice vs 2ndFlrSF") sns.scatterplot(x="TotalBsmtSF", y="SalePrice", data=dataset_train, ax=ax[1][2]) ax[1][2].set_title("SalePrice vs TotalBsmtSF") # #### Things we can learn from the previous analysis # - The lot area is not much correlated to the price, but maybe we can transform this column to get a better correlation (log?) # - The 1stFlrSF, 2ndFlrSF and TotalBsmtSF are correlated to the price but show an increase of the variance when the price increases. # - Summing all these values together give us a better correlation, with some outliers that we might have to remove or transform # ### Let's check if the overall quality and condition of the house have an impact on the price order_qual = ( dataset_train.groupby("OverallQual") .agg(np.mean) .sort_values("SalePrice") .index.to_list() ) order_cond = ( dataset_train.groupby("OverallCond") .agg(np.mean) .sort_values("SalePrice") .index.to_list() ) fig, ax = plt.subplots(2, 1, figsize=(22, 10)) sns.boxplot( x="OverallQual", y="SalePrice", data=dataset_train, order=order_qual, ax=ax[0] ) ax[0].set_title("Price vs OverallQual") sns.boxplot( x="OverallCond", y="SalePrice", data=dataset_train, order=order_qual, ax=ax[1] ) ax[1].set_title("Price vs OverallCond") # #### Things we can learn from the previous analysis # - The overall quality seems a good indicator of the price. # - The overall condition seems less good an indicator of the price, maybe with some binning we could get a better price indicator. # ### Let's see what we can learn from the construction year and remodeling year # order = dataset_train.groupby('Neighborhood').agg(np.mean).sort_values('SalePrice').index.to_list() fig, ax = plt.subplots(1, 1, figsize=(22, 7)) sns.boxplot(x="YearBuilt", y="SalePrice", data=dataset_train, ax=ax) ax.set_title("Price vs YearBuilt") dataset_train["DecadeBuilt"] = dataset_train["YearBuilt"] / 10 dataset_train["DecadeBuilt"] = dataset_train["DecadeBuilt"].astype(int) * 10 # dataset_train[['YearBuilt', 'DecadeBuilt']] fig, ax = plt.subplots(1, 1, figsize=(22, 7)) sns.boxplot(x="DecadeBuilt", y="SalePrice", data=dataset_train, ax=ax) ax.set_title("Price vs YearBuilt") # #### Things we can learn from the previous analysis # - The price of recent houses is a bit higher than houses between 1900 and 1970. # - House in the 1880's and the 1890's are also a bit more expensive. fig, ax = plt.subplots(1, 1, figsize=(22, 7)) sns.boxplot(x="YearRemodAdd", y="SalePrice", data=dataset_train, ax=ax) ax.set_title("Price vs YearRemodAdd") dataset_train["DecadeRemodAdd"] = dataset_train["YearRemodAdd"] / 10 dataset_train["DecadeRemodAdd"] = dataset_train["DecadeRemodAdd"].astype(int) * 10 # dataset_train[['YearBuilt', 'DecadeBuilt']] fig, ax = plt.subplots(1, 1, figsize=(22, 7)) sns.boxplot(x="DecadeRemodAdd", y="SalePrice", data=dataset_train, ax=ax) ax.set_title("Price vs DecadeRemodAdd") # #### Things we can learn from the previous analysis # - The price of houses that were remodeled very recently (2010) is also a bit higher than the rest. # - There seem to be a small increase of the price the more recent the remodelation was done # ### Let's see what we can learn from the number of room fig, ax = plt.subplots(1, 2, figsize=(22, 7)) sns.boxplot(x="BedroomAbvGr", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs number of bedrooms") sns.boxplot(x="TotRmsAbvGrd", y="SalePrice", data=dataset_train, ax=ax[1]) ax[1].set_title("Price vs number of rooms") # #### What can we learn from the previous analysis # - The number of rooms is correlated to the price, which was to be expected # - The number of bedroom presents something interesting. # - A house with 0 rooms above ground has a higher average price than houses with 1, 2 or 3 bedrooms above ground # - We need to dig a bit more into the houses with 0 bedroom. # - 1st idea, the number of rooms was badly indicated and we need to estimate it based on other features # - 2nd idea, the rooms are actually in the basement. # ### Let's dig a bit more in the houses with 0 bedroom above ground df_0bd = dataset_train.loc[dataset_train["BedroomAbvGr"] == 0] df_0bd[ [ "YearBuilt", "BldgType", "BsmtFinType1", "BsmtFinType2", "TotalBsmtSF", "GrLivArea", "TotRmsAbvGrd", "BsmtFullBath", "BsmtHalfBath", "FullBath", "HalfBath", "KitchenAbvGr", "SalePrice", ] ].head(20) df_0bd = dataset_test.loc[dataset_train["BedroomAbvGr"] == 0] df_0bd[ [ "YearBuilt", "BldgType", "BsmtFinType1", "BsmtFinType2", "TotalBsmtSF", "GrLivArea", "TotRmsAbvGrd", "BsmtFullBath", "BsmtHalfBath", "FullBath", "HalfBath", "KitchenAbvGr", ] ].head(20) # Actually there is only a few houses without bedrooms above ground, but some have a very high number of rooms. # There is probably a way of estimating the number of bedrooms # ### Let's check if the conditions of the sales and the time of the sale have an impact on the price fig, ax = plt.subplots(2, 3, figsize=(22, 10)) sns.boxplot(x="SaleType", y="SalePrice", data=dataset_train, ax=ax[0][0]) ax[0][0].set_title("Price vs SaleType") sns.boxplot(x="MoSold", y="SalePrice", data=dataset_train, ax=ax[0][1]) ax[0][1].set_title("Price vs month of sale") sns.boxplot(x="YrSold", y="SalePrice", data=dataset_train, ax=ax[0][2]) ax[0][2].set_title("Price vs year of sale") sns.countplot(x="SaleType", data=dataset_train, ax=ax[1][0]) ax[1][0].set_title("Price vs SaleType") sns.countplot(x="MoSold", data=dataset_train, ax=ax[1][1]) ax[1][1].set_title("Price vs month of sale") sns.countplot(x="YrSold", data=dataset_train, ax=ax[1][2]) ax[1][2].set_title("Price vs year of sale") # The year and month of sale does not seem to have an impact on the price. # The sale type seems to have an impact but outside of new houses I don't think it is usefull to consider other types of sale because of the population size which is very small. # # 4. Data cleaning and feature selection # Let's start by creating some new columns as we stated in the column analysis. # ## 4.1 Feature creation # ### Columns we will transform # - OverallQual & OverallCond, we can probably aggregate these columns together # - YearBuilt & YearRemodAdd # - Check if all these columns ExterQual, ExterCond, BsmtCond, BsmtFinType1, BsmtFinType2, HeatingQC, KitchenQual, GarageQual and GarageCond give us a good aproximation of overall quality and overall condition or if there was some errors during the calculation # - TotalSF = TotalBsmtSF + 1stFlrSF + 2ndFlrSF # - TotalBath = FullBath + HalfBath + BsmtFullBath + BsmtHalfBath # - TotalPorch = WoodDeckSF + openPorchSF + EnclosedPorch + 3SsnPorch + ScreenPorch # #### Let's start with a study of the quality and condition of the house dataset_train[ [ "OverallQual", "ExterQual", "BsmtQual", "HeatingQC", "FireplaceQu", "GarageQual", "PoolQC", ] ] dataset_train[["OverallCond", "ExterCond", "BsmtCond", "HeatingQC", "GarageCond"]] # There is differences of OverallCond between houses with the same score for ExterCond, BsmtCond, HeatingQC, GarageCond. # We could recalculate a new score for the overall condition. for col in ["ExterCond", "BsmtCond", "HeatingQC", "GarageCond"]: dataset_train[col] = dataset_train[col].map( {"Ex": 9, "Gd": 7, "TA": 5, "Fa": 3, "NA": np.nan}, na_action="ignore" ) dataset_train["OverallCond_calculated"] = dataset_train[ ["ExterCond", "BsmtCond", "HeatingQC", "GarageCond"] ].mean(axis=1) dataset_train["OverallCond"] = dataset_train["OverallCond"].astype(float) df = dataset_train.loc[ abs(dataset_train["OverallCond"] - dataset_train["OverallCond_calculated"]) > 1 ] dataset_train["OverallCond_calculated"] = dataset_train[ "OverallCond_calculated" ].astype(int) dataset_train["OverallCond_calculated_2"] = ( dataset_train[["OverallCond_calculated", "OverallCond"]].mean(axis=1).astype(int) ) fig, ax = plt.subplots(3, 1, figsize=(22, 15)) sns.boxplot(x="OverallCond", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs OverallQual") sns.boxplot(x="OverallCond_calculated", y="SalePrice", data=dataset_train, ax=ax[1]) ax[1].set_title("Price vs OverallCond calculated") sns.boxplot(x="OverallCond_calculated_2", y="SalePrice", data=dataset_train, ax=ax[2]) ax[2].set_title("Price vs OverallCond calculated 2") # It seems that by averaging the overall condition score and our calculated condition score and binning the result into two categories: # good condition (>=5) and average condition (<5) we could get a better feature. # #### Let's now check if the calculus of square feet is proprerly made. # TotalSF = TotalBsmtSF + 1stFlrSF + 2ndFlrSF + BsmtUnfSF + LowQualFinSF dataset_train[ [ "TotalSF", "GrLivArea", "TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "BsmtUnfSF", "LowQualFinSF", ] ] # We can see that it doesn't include the basement SF which in my opinion is a mistake, for example on row number 1458, the house has half its surface in the basement. # There must be some value in the surface. # #### Let's see if we can estimathe number of bedrooms len(dataset_train.loc[dataset_train["BedroomAbvGr"] == 0]), len( dataset_test.loc[dataset_test["BedroomAbvGr"] == 0] ) dataset_train.loc[dataset_train["BedroomAbvGr"] == 0] # df = dataset_train.loc[abs(dataset_train['TotRmsAbvGrd'] - dataset_train['KitchenAbvGr'] - dataset_train['BedroomAbvGr']) >= 4] df = dataset_train.copy() df["bedrooms_ratio"] = dataset_train["BedroomAbvGr"] / dataset_train["TotRmsAbvGrd"] df[["TotRmsAbvGrd", "KitchenAbvGr", "BedroomAbvGr", "bedrooms_ratio"]].describe() df.loc[ (df["bedrooms_ratio"] <= 0.2) | (df["bedrooms_ratio"] >= 0.8), ["TotRmsAbvGrd", "KitchenAbvGr", "BedroomAbvGr", "bedrooms_ratio"], ].head(20) fig, ax = plt.subplots(1, 2, figsize=(22, 7)) sns.histplot(x="bedrooms_ratio", data=df, stat="probability", ax=ax[0]) ax[0].set_title("Count of bedrooms_ratio") sns.scatterplot(x="bedrooms_ratio", y="TotRmsAbvGrd", data=df, ax=ax[1]) ax[1].set_title("TotRmsAbvGrd vs bedrooms ratio") # Let's try to increase the have a bedroom ratio between 0.5 and 0.8 or 0.6 for big houses. # We will increase or decrease the number of bedrooms until we reach a satisfactory ratio. mask = ( ((df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4)) | ((df["bedrooms_ratio"] <= 0.33) & (df["TotRmsAbvGrd"] >= 5)) ) | ( ((df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 6)) | ((df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 5)) ) df.loc[mask, ["TotRmsAbvGrd", "KitchenAbvGr", "BedroomAbvGr", "bedrooms_ratio"]].head( 20 ) # Let's first put a kitchen in every house df.loc[df["KitchenAbvGr"] == 0, "KitchenAbvGr"] = 1 mask = ( ((df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4)) | ((df["bedrooms_ratio"] <= 0.33) & (df["TotRmsAbvGrd"] >= 5)) ) | ( ((df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5)) | ((df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4)) ) while len(df.loc[mask]) != 0: # small houses, this means that small house have more bedrooms than other room df.loc[ (df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4), "BedroomAbvGr" ] += 1 df.loc[ (df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4), "BedroomAbvGr" ] -= 1 # big houses df.loc[ (df["bedrooms_ratio"] < 0.33) & (df["TotRmsAbvGrd"] >= 5), "BedroomAbvGr" ] += 1 df.loc[ (df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5), "BedroomAbvGr" ] -= 1 # update value of bedroom ratio df.loc[:, "bedrooms_ratio"] = df["BedroomAbvGr"] / df["TotRmsAbvGrd"] mask = ( ((df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4)) | ((df["bedrooms_ratio"] <= 0.33) & (df["TotRmsAbvGrd"] >= 5)) ) | ( ((df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5)) | ((df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4)) ) df[["TotRmsAbvGrd", "KitchenAbvGr", "BedroomAbvGr", "bedrooms_ratio"]].head(30) df[["TotRmsAbvGrd", "KitchenAbvGr", "BedroomAbvGr", "bedrooms_ratio"]].describe() fig, ax = plt.subplots(2, 2, figsize=(22, 15)) sns.boxplot(x="BedroomAbvGr", y="SalePrice", data=df, ax=ax[0][0]) ax[0][0].set_title("Price vs number of bedrooms after normalization") sns.boxplot(x="BedroomAbvGr", y="SalePrice", data=dataset_train, ax=ax[0][1]) ax[0][1].set_title("Price vs number of bedrooms before normalization") sns.countplot(x="BedroomAbvGr", data=df, ax=ax[1][0]) ax[1][0].set_title("Count of number of bedrooms after normalization") sns.countplot(x="BedroomAbvGr", data=dataset_train, ax=ax[1][1]) ax[1][1].set_title("Count of number of bedrooms before normalization") # We almost didn't change the distribution of bedrooms in the houses but now we have a more logicalrelation between number of bedrooms and SalePrice. # #### Let's now see if we can normalize the number of rooms and bedrooms by the size of the house. dataset_train["SF_per_BedroomAbvGr"] = ( dataset_train["TotalSF"] / dataset_train["BedroomAbvGr"] ) dataset_train["SF_per_TotRmsAbvGrd_per_sf"] = ( dataset_train["TotalSF"] / dataset_train["TotRmsAbvGrd"] ) fig, ax = plt.subplots(1, 2, figsize=(22, 7)) sns.scatterplot(x="SF_per_BedroomAbvGr", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs SF_per_BedroomAbvGr") sns.scatterplot( x="SF_per_TotRmsAbvGrd_per_sf", y="SalePrice", data=dataset_train, ax=ax[1] ) ax[1].set_title("Price vs SF_per_TotRmsAbvGrd_per_sf") # #### Let's check the total number of bath dataset_train["Total_bath"] = dataset_train[ ["FullBath", "HalfBath", "BsmtFullBath", "BsmtHalfBath"] ].sum(axis=1) fig, ax = plt.subplots(1, 2, figsize=(22, 7)) sns.boxplot(x="Total_bath", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs total number of bath") sns.countplot(x="Total_bath", data=dataset_train, ax=ax[1]) ax[1].set_title("Price vs SF_per_TotRmsAbvGrd_per_sf") # We could categorize the total number of baths with 1, 2, 3 and 4+ number of bath # #### Let's see if the total porch area is a good estimator of the price of the house. dataset_train["Total_porch_SF"] = dataset_train[ ["WoodDeckSF", "OpenPorchSF", "EnclosedPorch", "3SsnPorch", "ScreenPorch"] ].sum(axis=1) fig, ax = plt.subplots(1, 2, figsize=(22, 7)) sns.scatterplot(x="Total_porch_SF", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs Total_porch_SF") # The total size of the porch does not seem to be a good estimator of the SalePrice # #### Let's look at the garage size fig, ax = plt.subplots(1, 3, figsize=(22, 7)) sns.boxplot(x="GarageCars", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs GarageCars") sns.scatterplot(x="GarageArea", y="SalePrice", data=dataset_train, ax=ax[1]) ax[1].set_title("Price vs GarageArea") sns.countplot(x="GarageCars", data=dataset_train, ax=ax[2]) ax[2].set_title("Count of GarageCars") # We could do like with Total_bath, 0, 1, 2 or 3+ categories for GarageCars # #### Let's investigate a bit more the feature that don't appear often in the house like pool, fireplace and Misc and their value fig, ax = plt.subplots(2, 3, figsize=(22, 12)) sns.boxplot(x="MiscFeature", y="SalePrice", data=dataset_train, ax=ax[0][0]) ax[0][0].set_title("Price vs MiscFeature") sns.scatterplot(x="PoolArea", y="SalePrice", data=dataset_train, ax=ax[0][1]) ax[0][1].set_title("Price vs PoolArea") sns.scatterplot(x="MiscVal", y="SalePrice", data=dataset_train, ax=ax[0][2]) ax[0][2].set_title("SalePrice of MiscVal") sns.boxplot(x="Fireplaces", y="SalePrice", data=dataset_train, ax=ax[1][0]) ax[1][0].set_title("Price vs Fireplaces") sns.boxplot(x="FireplaceQu", y="SalePrice", data=dataset_train, ax=ax[1][1]) ax[1][1].set_title("Price vs FireplaceQu") sns.countplot(x="Fireplaces", data=dataset_train, ax=ax[1][2]) ax[1][2].set_title("Count of Fireplaces") # It does not seem interesting to take into account these Misc features. # Maybe we could integrate the number of fireplaces. # ## 4.2 Checking for missing data # For now the features we are considering are : # - MSZoning, Neighborhood # - OverallQual & OverallCond, ExterCond, BsmtCond, HeatingQC, GarageCond # - YearBuilt & YearRemodAdd # - SaleType # - Bedroom & TotRmsAbvGrd, FullBath + HalfBath + BsmtFullBath + BsmtHalfBath, Kitchen # - TotalSF # - GarageCars # dataset_train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) dataset_test = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ) train = dataset_train[ [ "MSZoning", "Neighborhood", "OverallQual", "OverallCond", "ExterCond", "BsmtCond", "HeatingQC", "GarageCond", "YearBuilt", "YearRemodAdd", "SaleType", "BedroomAbvGr", "TotRmsAbvGrd", "FullBath", "HalfBath", "BsmtFullBath", "BsmtHalfBath", "KitchenAbvGr", "GrLivArea", "GarageCars", ] ] train.isna().sum() train.describe() test = dataset_test[ [ "MSZoning", "Neighborhood", "OverallQual", "OverallCond", "ExterCond", "BsmtCond", "HeatingQC", "GarageCond", "YearBuilt", "YearRemodAdd", "SaleType", "BedroomAbvGr", "TotRmsAbvGrd", "FullBath", "HalfBath", "BsmtFullBath", "BsmtHalfBath", "KitchenAbvGr", "GrLivArea", "GarageCars", ] ] test.isna().sum() # ## 4.3 Checking for similar distribution in the train and test datasets def hist_train_test(train, test, col_type="numerical"): train["type"] = "train" test["type"] = "test" total = pd.concat([test, train]) if col_type == "numerical": cols = total.select_dtypes(include=np.number).columns.to_list() if col_type == "categorical": cols = total.select_dtypes(include=["object", "category"]).columns.to_list() nb_rows = int(round(0.9 + (len(cols) / 3), 0)) i = j = 0 fig, ax = plt.subplots(nb_rows, 3, figsize=(22, nb_rows * 6)) for col in cols: if col_type == "numerical": ax[i][j] = sns.histplot( x=col, data=total, hue="type", multiple="dodge", bins=20, ax=ax[i][j] ) if col_type == "categorical": ax[i][j] = sns.countplot(x=col, data=total, hue="type", ax=ax[i][j]) if j == 2: i = i + 1 j = (j + 1) % 3 hist_train_test(train, test, "numerical") hist_train_test(train, test, "categorical") # The distribution of the data between train set and test set is very close. # We don't need to modify our data. # ## 4.4 Preprocessing pipeline # Steps to include in the preprocessing pipeline: # - Feature engineering # - Calculate TotalSF # - Create a square feet per room and square feet per bedroom feature # - Create a BuiltDecade and RemodallDecade # - Recalculate the number of bedrooms # - MSZoning, put the three smallest categories together in other # - SaleType, put the smallest categories together in other # - Total_bath = FullBath + HalfBath + BsmtFullBath + BsmtHalfBath an then we categorize them with 1, 2, 3, or 4+ number of bath # - Impute missing values # - Either with most frequent value, the median or None # - Remove outliers # - Remove two properties with very high SF and low price # - Apply log transform to columns # - Encode feature # - Neighborhood # - MSZoning # - SaleType # - Binning of features # - OverallCond into two categories: good condition (>=5) and average condition (<5) # - GarageCars 0, 1, 2, 3+ # - Fireplaces 0, 1, 2+ # - Scale features (standard scaler) # - Remove feature (the remaining) # Preprocessing functions def remove_outliers(df): df.drop(df[df["GrLivArea"] > 4000].index, inplace=True) return df def get_TotalSF(df): df["TotalSF"] = df["1stFlrSF"] + df["2ndFlrSF"] + df["TotalBsmtSF"] return df def get_age(df): df["age"] = df["YrSold"] - df["YearBuilt"] return df def apply_label_encoder(df): cols = ( "FireplaceQu", "BsmtQual", "BsmtCond", "GarageQual", "GarageCond", "ExterQual", "ExterCond", "HeatingQC", "PoolQC", "KitchenQual", "BsmtFinType1", "BsmtFinType2", "Functional", "Fence", "BsmtExposure", "GarageFinish", "LandSlope", "LotShape", "PavedDrive", "Street", "Alley", "CentralAir", "OverallCond", "YrSold", "MoSold", ) for c in cols: lbl = LabelEncoder() lbl.fit(list(df[c].values)) df[c] = lbl.transform(list(df[c].values)) return df def reformat_categorical_data(df): df["MSSubClass"] = df["MSSubClass"].apply(str) df["OverallCond"] = df["OverallCond"].astype(str) df["YrSold"] = df["YrSold"].astype(str) df["MoSold"] = df["MoSold"].astype(str) return df def get_house_particularities(df): df["Has_shed"] = (df["MiscFeature"] == "Shed") * 1.0 df["Remodeled"] = (df["YearRemodAdd"] != df["YearBuilt"]) * 1 df["Recent_remodel"] = (df["YearRemodAdd"] == df["YrSold"]) * 1 df["Very_new_house"] = (df["YearBuilt"] == df["YrSold"]) * 1 df["Has_2nd_floor"] = (df["2ndFlrSF"] != 0) * 1 df["Has_pool"] = (df["PoolArea"] != 0) * 1 df["Number_of_porch"] = ( (df["OpenPorchSF"] != 0) * 1 + (df["EnclosedPorch"] != 0) * 1 + (df["3SsnPorch"] != 0) * 1 + (df["ScreenPorch"] != 0) * 1 ) df["Has_porch"] = (df["Number_of_porch"] != 0) * 1 df["Has_Wood_deck"] = (df["WoodDeckSF"] != 0) * 1 return df def get_Total_bath(df): df["Total_bath"] = ( df["FullBath"] + df["HalfBath"] + df["BsmtFullBath"] + df["BsmtHalfBath"] ) df.loc[df["Total_bath"] > 4, "Total_bath"] = 4 return df def normalize_SF_per_room(df): df["SF_per_BedroomAbvGr"] = df["TotalSF"] / df["BedroomAbvGr"] df["SF_per_TotRmsAbvGrd"] = df["TotalSF"] / df["TotRmsAbvGrd"] return df def get_decade(df): df["DecadeBuilt"] = (df["YearBuilt"] / 10).astype(int) * 10 df["DecadeRemodAdd"] = (df["YearRemodAdd"] / 10).astype(int) * 10 return df def get_estimation_OverallCond(df): for col in ["ExterCond", "BsmtCond", "HeatingQC", "GarageCond"]: df[col + "_"] = df[col].map( {"Ex": 9, "Gd": 7, "TA": 5, "Fa": 3, "NA": np.nan}, na_action="ignore" ) df["OverallCond_calculated_1"] = df[ ["ExterCond_", "BsmtCond_", "HeatingQC_", "GarageCond_"] ].mean(axis=1) df["OverallCond"] = df["OverallCond"].astype(float) df["OverallCond_calculated_2"] = ( df[["OverallCond_calculated_1", "OverallCond"]].mean(axis=1).astype(int) ) # df = df.drop(columns=['ExterCond_', 'BsmtCond_', 'HeatingQC_', 'GarageCond_'], axis=1) return df def get_estimation_bedrooms(df): df.loc[:, "bedrooms_ratio"] = df["BedroomAbvGr"] / df["TotRmsAbvGrd"] mask = ( ((df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4)) | ((df["bedrooms_ratio"] <= 0.33) & (df["TotRmsAbvGrd"] >= 5)) ) | ( ((df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5)) | ((df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4)) ) while len(df.loc[mask]) != 0: # small houses, this means that small house have more bedrooms than other room df.loc[ (df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4), "BedroomAbvGr" ] += 1 df.loc[ (df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4), "BedroomAbvGr" ] -= 1 # big houses df.loc[ (df["bedrooms_ratio"] < 0.33) & (df["TotRmsAbvGrd"] >= 5), "BedroomAbvGr" ] += 1 df.loc[ (df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5), "BedroomAbvGr" ] -= 1 # update value of bedroom ratio df.loc[:, "bedrooms_ratio"] = df["BedroomAbvGr"] / df["TotRmsAbvGrd"] mask = ( ((df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4)) | ((df["bedrooms_ratio"] <= 0.33) & (df["TotRmsAbvGrd"] >= 5)) ) | ( ((df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5)) | ((df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4)) ) return df def bin_MSZoning(df): df.loc[(df["MSZoning"] != "RL") & (df["MSZoning"] != "RM"), "MSZoning"] = "other" return df def bin_SaleType(df): df.loc[(df["SaleType"] != "WD") & (df["SaleType"] != "new"), "SaleType"] = "other" return df def bin_column(df, col, bins, labels): df[col] = pd.cut(df[col], bins=bins, labels=labels) df[col] = df[col].astype(float) return df def log_transform(df): numeric_features = df.dtypes[df.dtypes != "object"].index skewed = df[numeric_features].apply(lambda x: skew(x.dropna().astype(float))) skewed = skewed[skewed > 0.75] skewed = skewed.index df[skewed] = np.log1p(df[skewed]).astype(float) return df def impute_missing_data(df): df.fillna({"MSZoning": df["MSZoning"].mode().iloc[0]}, inplace=True) df.fillna({"SaleType": df["SaleType"].mode().iloc[0]}, inplace=True) df.fillna({"Electrical": df["Electrical"].mode()[0]}, inplace=True) df.fillna({"KitchenQual": df["KitchenQual"].mode()[0]}, inplace=True) df.fillna({"Exterior1st": df["Exterior1st"].mode()[0]}, inplace=True) df.fillna({"Exterior2nd": df["Exterior2nd"].mode()[0]}, inplace=True) df.loc[df["KitchenAbvGr"] == 0, "KitchenAbvGr"] = 1 df["LotFrontage"] = df.groupby("Neighborhood")["LotFrontage"].transform( lambda x: x.fillna(x.median()) ) df.fillna( { "GarageCars": 0, "BsmtFullBath": 0, "BsmtHalfBath": 0, "TotalBsmtSF": 0, "MSSubClass": "None", "HeatingQC": "None", "PoolQC": "None", "MiscFeature": "None", "Alley": "None", "Fence": "None", "FireplaceQu": "None", "GarageType": "None", "GarageFinish": "None", "GarageQual": "None", "GarageCond": "NA", "ExterCond": "NA", "GarageYrBlt": 0, "GarageArea": 0, "GarageCars": 0, "BsmtQual": "None", "BsmtCond": "NA", "BsmtFinType1": "None", "BsmtExposure": "None", "BsmtFinType2": "None", "MasVnrArea": 0, "MasVnrType": "None", "Functional": "Typ", }, inplace=True, ) return df def drop_cols(df): df = df.drop(columns=["Id"]) # df = df.drop(df.columns.difference(['MSZoning','SaleType', 'TotalSF', 'Total_bath', 'SF_per_BedroomAbvGr', 'SF_per_TotRmsAbvGrd', 'DecadeBuilt', # 'DecadeRemodAdd', 'OverallCond_calculated_2', 'GarageCars', 'OverallQual', 'Neighborhood', 'bedrooms_ratio', 'BedroomAbvGr', 'TotRmsAbvGrd', # 'KitchenAbvGr', 'Fireplaces', 'age', 'Has_shed', 'Remodeled','Recent_remodel','Very_new_house','Has_2nd_floor','Number_of_porch', # 'Has_porch','Has_Wood_deck','Has_pool','SalePrice','','','','','']), 1, inplace=True) return df train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv") train = remove_outliers(train) combine = [train, test] for df in combine: df = impute_missing_data(df) # df = get_age(df) # df = get_house_particularities(df) # df = get_TotalSF(df) # df = get_Total_bath(df) # df = get_estimation_bedrooms(df) # df = normalize_SF_per_room(df) # df = get_decade(df) # df = get_estimation_OverallCond(df) df = reformat_categorical_data(df) # df = bin_MSZoning(df) # df = bin_SaleType(df) # df = bin_column(df, 'GarageCars', bins=[-0.5,0.5,1.5,2.5,10], labels=[0,1,2,3]) # df = bin_column(df, 'Fireplaces', bins=[-0.5,0.5,1.5,10], labels=[0,1,2]) # df = bin_column(df, 'Total_bath', bins=[-0.5,1.5,2.5,3.5,10], labels=[1,2,3,4]) # df = bin_column(df, 'BedroomAbvGr', bins=[-0.5,2.5,3.5,4.5,20], labels=[2,3,4,5]) # df = bin_column(df, 'TotRmsAbvGrd', bins=[-0.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,20], labels=[3,4,5,6,7,8,9,10]) df = log_transform(df) df = drop_cols(df) # df = impute_missing_data(df) (train.shape, test.shape) preprocessing = make_column_transformer( ( OneHotEncoder(handle_unknown="ignore", sparse=False), train.select_dtypes(include=["object", "category"]).columns.to_list(), ), remainder="passthrough", ) # # 5. Model Preparation # dataset_train = pd.read_csv("/kaggle/input/titanic/train.csv") y = np.ravel(np.array([train["SalePrice"]]).T) X = train.drop(columns=["SalePrice"]) X_pred = test.copy() folds = KFold(n_splits=10, shuffle=True, random_state=0) pipeline = Pipeline( [ ("preprocessing", preprocessing), ("scaler", RobustScaler()), # ('scaler' , StandardScaler()), # ('pca' , PCA(n_components=10)) ] ) pipeline.fit(X) X_preprocessed = pd.DataFrame(pipeline.transform(X)) X_preprocessed.shape X_pred_preprocessed = pd.DataFrame(pipeline.transform(X_pred)) X_pred_preprocessed = X_pred_preprocessed.fillna(X_pred_preprocessed.median()) X_pred_preprocessed.shape # # 6. Pipeline results models_list = { "Ridge": Ridge(), "DecisionTree Regressor": DecisionTreeRegressor(), "Random Forest": RandomForestRegressor(), "SVR": SVR(), "LGBMRegressor": LGBMRegressor(verbosity=0, force_row_wise=True), "XGBRegressor": XGBRegressor(use_label_encoder=False, verbosity=0), "CatBoostRegressor": CatBoostRegressor(verbose=0), "Lasso": Lasso(alpha=0.0005), "KernelRidge": KernelRidge(), "ElasticNet": ElasticNet(alpha=0.0004, l1_ratio=0.9), "BayesianRidge": BayesianRidge(), "GradientBoostingRegressor": GradientBoostingRegressor(), } model_perf_matrix = [] for model_name, model in models_list.items(): pipeline = Pipeline( [ ("preprocessing", preprocessing), ("scaler", RobustScaler()), # ('scaler' , StandardScaler()), # ('pca' , PCA(n_components=10)), ("model", model), ] ) cv_score = np.sqrt( -cross_val_score(pipeline, X, y, cv=folds, scoring="neg_mean_squared_error") ) model_perf_matrix.append( [model_name, round(cv_score.mean(), 4), round(cv_score.std(), 4)] ) df_model_perf = pd.DataFrame( model_perf_matrix, columns=["Model", "Mean value", "Std value"] ) df_model_perf models_list = { "CatBoostRegressor": { "model": CatBoostRegressor(verbose=0), "param_grid": {"max_depth": [3, 4, 5, 6, 8], "n_estimators": [100, 200, 300]}, }, "BayesianRidge": { "model": BayesianRidge(), "param_grid": { "alpha_1": [1e-7, 1e-6], "alpha_2": [1e-6, 1e-5], "lambda_1": [1e-6, 1e-5], "lambda_2": [1e-7, 1e-6], }, }, "Ridge": { "model": Ridge(), "param_grid": {"alpha": [7, 10, 10.5, 11, 11.5, 12, 15]}, }, "Lasso": { "model": Lasso(), "param_grid": {"alpha": [1e-4, 0.00025, 0.0005, 0.00075, 1e-3]}, }, "ElasticNet": { "model": ElasticNet(), "param_grid": { "alpha": [0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006], "l1_ratio": [0, 7, 0, 8, 0.9, 1, 1.1], }, }, "SVR": { "model": SVR(), "param_grid": { "gamma": [1e-5, 1e-4, 1e-3, 0.01], "C": [10, 50, 100, 150, 200], "epsilon": [1e-3, 0.005, 0.01, 0.2], }, }, } results = {} for model_name, model in models_list.items(): best_model = GridSearchCV( estimator=model["model"], param_grid=model["param_grid"], cv=folds, scoring="neg_mean_squared_error", ) best_model.fit(X_preprocessed, y) print(model_name) print(best_model.best_params_) best_model.best_score_ = np.sqrt(-best_model.best_score_) print( "Mean score : ", best_model.best_score_, " Std : ", best_model.cv_results_["std_test_score"][best_model.best_index_], ) model_results = { "estimator": best_model.best_estimator_, "best_params": best_model.best_params_, "mean": best_model.best_score_, "std": best_model.cv_results_["std_test_score"][best_model.best_index_], } results[model_name] = model_results # ### The four different models all have very similar scores, let's use all these models in an ensemble classifier # create a dictionary of our models estimators = [] for model_name, model in results.items(): estimators.append((model_name, model["estimator"])) # create our voting classifier, inputting our models ensemble = VotingRegressor(estimators) ensemble.fit(X_preprocessed, y) # # 7. Prediction ensemble.predict(X_pred_preprocessed) y_pred = np.floor(np.expm1(ensemble.predict(X_pred_preprocessed))) output = pd.DataFrame( { "Id": pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ).Id, "SalePrice": y_pred, } ) # q1 = output['SalePrice'].quantile(0.005) # q2 = output['SalePrice'].quantile(0.995) # output['SalePrice'] = output['SalePrice'].apply(lambda x: x if x > q1 else x*0.77) # output['SalePrice'] = output['SalePrice'].apply(lambda x: x if x < q2 else x*1.1) output output.to_csv("my_submission.csv", index=False) print("Your submission was successfully saved!")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/433/69433739.ipynb
null
null
[{"Id": 69433739, "ScriptId": 15563265, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6877454, "CreationDate": "07/31/2021 01:14:44", "VersionNumber": 8.0, "Title": "House price regression - Full EDA + ML workflow", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 842.0, "LinesInsertedFromPrevious": 53.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 789.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # House price regression # ## Description of the notebook # This notebook is organized in 7 parts. # 1. Data acquisition: we will import the dataset and get a first glance at what it contains. # 2. Dataset exploratory analysis : we will analyze the data, get an understanding of the main features, of their types, if they have missing values... # 3. Features exploratory analysis : we will analyze the data,find the corelation between the features and the survival rate and decide which features to keep # 4. Data cleaning and feature selection : Select features that will be kept in the model and remove others, deal with missing values if there is any, create dummy variables for categories... # 5. Model preparation : prepare the train and test set and prepare the models we will use for the classification # 6. Pipeline evaluation and selection: We will run the models and get their scores, which will allow us the choose the best model. # 7. Predict : Final stage, we will run our final model to execute predictions. # # 1. Data acquisition import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression, SGDRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.svm import SVR from lightgbm import LGBMRegressor from xgboost.sklearn import XGBRegressor from catboost import CatBoostRegressor from sklearn.kernel_ridge import KernelRidge from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, Ridge from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.compose import make_column_transformer from sklearn.preprocessing import FunctionTransformer from sklearn.decomposition import PCA from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.ensemble import VotingRegressor from scipy.stats import skew pd.set_option("display.max_rows", 500) pd.set_option("display.max_columns", 500) pd.set_option("display.width", 1000) sns.set_style("darkgrid") import warnings warnings.filterwarnings("ignore") dataset_train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) dataset_test = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ) # # 2. Dataset Exploratory analysis # In this section we will check the number and types of features for the dataset, if there is missing values, if there is corelation between some features, if we can remove some unnecessary features and if we can create new features from the ones we already have def screen_data(df): rows = [] for col in df.columns: rows.append([col, df[col].isnull().sum(), df[col].nunique(), df[col].dtypes]) return pd.DataFrame( rows, columns=["Col", "Missing values", "Unique values", "Type"] ) def screen_missing_values(df, ratio=0.1, verbose=0): total_rows = len(df) total_columns = len(df.columns) df = screen_data(df) df_m = df.loc[df["Missing values"] != 0] # .sort_values(['Type', 'Missing values']) total_missing_values = len(df_m) df_m.loc[:, "Percentage missing values"] = df_m["Missing values"] / total_rows missing_values_less10 = len(df_m.loc[df_m["Percentage missing values"] <= ratio]) missing_values_more10 = len(df_m.loc[df_m["Percentage missing values"] > ratio]) print( "Total missing values : ", total_missing_values, " --> ", round(100 * total_missing_values / total_columns, 0), "% of all features", ) print( "Columns with less than ", round(ratio * 100, 0), "% missing values : ", missing_values_less10, " --> ", round(100 * missing_values_less10 / total_missing_values, 0), "% of features with missing values", ) print( "Columns with more than ", round(ratio * 100, 0), "% missing values : ", missing_values_more10, " --> ", round(100 * missing_values_more10 / total_missing_values, 0), "% of features with missing values", ) if verbose >= 1: print("") print( df_m.drop(columns=["Unique values", "Type"]).loc[ df_m["Percentage missing values"] > ratio ] ) # ## 2.1 Dataframe information dataset_train.shape, dataset_test.shape # The train and the test set are the same size, so it's going to be important not to overfit the model. dataset_train.head(10) df = screen_data(dataset_train) df.T # ### Missing values screen_missing_values(dataset_train, verbose=1) screen_missing_values(dataset_test, verbose=1) # In the dataset test there is more columns with missing values, but most of them have less than 10% missing values. # The column with more than 10% missing values are the same in train and test. # The columns might have missing values to indicate an absence of pool or fireplace for example, we will have to analyse things a bit more to conclude. # ### Types of data print( "Categorical features : ", len( dataset_train.drop(columns=["Id", "SalePrice"]) .select_dtypes(include="object") .columns ), ) print( "Numerical features : ", len( dataset_train.drop(columns=["Id", "SalePrice"]) .select_dtypes(exclude="object") .columns ), ) print("Total features : ", len(dataset_train.drop(columns=["Id", "SalePrice"]).columns)) # A lot of numerical and categorical data # ## 2.2 Data description # Based on this very small analysis we can see that we have 79 different features. # In order to start analysing we will have to choose some feature we think might give us a good aproximation of the sale price. # It is not possible to carry an extensive EDA on all the features. # Let's if we can drop some feature which give us the same info (abs(corr) > 0.85), or that represent the same idea. # ## Let's check the document describing the data # ### Columns we will analyse # - MSZoning because there is probably a difference between an agriculture zone, an industrial zone and a residential zone. # - Lot area, which is the size of the lot # - Neighborhood, probably the price is not the same depending on the neighborhood. # - OverallQual & OverallCond # - YearBuilt & YearRemodAdd # - MoSold, YrSold: maybe the time of year has an impact on the price # - SaleType # - Bedroom & TotRmsAbvGrd # - TotalSF = TotalBsmtSF + 1stFlrSF + 2ndFlrSF # ### Columns we will transform # - OverallQual & OverallCond, we can probably aggregate thse columns together # - YearBuilt & YearRemodAdd # - Check if all these columns ExterQual, ExterCond, BsmtCond, BsmtFinType1, BsmtFinType2, HeatingQC, KitchenQual, GarageQual and GarageCond give us a good aproximation of overall quality and overall condition or if there was some errors during the calculation # - TotalSF = TotalBsmtSF + 1stFlrSF + 2ndFlrSF # - TotalBath = FullBath + HalfBath + BsmtFullBath + BsmtHalfBath # - TotalPorch = WoodDeckSF + openPorchSF + EnclosedPorch + 3SsnPorch + ScreenPorch # ### Columns we will probably drop # - LandSlope or LandContour, because they contain the same information # - The columns with a lot of missing data, unless they provide very valuable data and we can consider that missing data is just an abscence of pool or fireplace. # # 3. Feature exploratory analysis # Let's start with two categorical data that describe the surroundings of the house MSZoning and Neighborhood. fig, ax = plt.subplots(1, 2, figsize=(22, 5)) sns.boxplot(x="MSZoning", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs MSZoning") sns.countplot(x="MSZoning", data=dataset_train, ax=ax[1]) ax[1].set_title("Count per MSZoning") order = ( dataset_train.groupby("Neighborhood") .agg(np.mean) .sort_values("SalePrice") .index.to_list() ) fig, ax = plt.subplots(2, 1, figsize=(22, 10)) sns.boxplot(x="Neighborhood", y="SalePrice", data=dataset_train, order=order, ax=ax[0]) ax[0].set_title("Price vs Neighborhood") sns.countplot(x="Neighborhood", data=dataset_train, order=order, ax=ax[1]) ax[1].set_title("Price vs Neighborhood") order = ( dataset_train.groupby("Neighborhood") .agg(np.mean) .sort_values("SalePrice") .index.to_list() ) fig, ax = plt.subplots(1, 1, figsize=(22, 7)) sns.boxplot( x="Neighborhood", y="SalePrice", hue="MSZoning", data=dataset_train, order=order, ax=ax, ) ax.set_title("Price vs Neighborhood") # #### Things we can learn from the previous analysis # - The Neighborhood seems to have a big impact on the price # - Inside a neighborhood, the price can change depending on the zone for example in Crawfor. # ### Let's continue witht he size of the house # We will check the lot size and the size of the house. dataset_train["TotalSF"] = ( dataset_train["1stFlrSF"] + dataset_train["2ndFlrSF"] + dataset_train["TotalBsmtSF"] ) fig, ax = plt.subplots(2, 3, figsize=(22, 10)) sns.scatterplot(x="LotArea", y="SalePrice", data=dataset_train, ax=ax[0][0]) ax[0][0].set_title("SalePrice vs LotArea") sns.scatterplot(x="TotalSF", y="SalePrice", data=dataset_train, ax=ax[0][1]) ax[0][1].set_title("SalePrice vs TotalSF") sns.scatterplot( x="GrLivArea", y="SalePrice", data=dataset_train, ax=ax[0][2] ) # LowQualFinSF or BsmtUnfSF ax[0][2].set_title("SalePrice vs GrLivArea") sns.scatterplot(x="1stFlrSF", y="SalePrice", data=dataset_train, ax=ax[1][0]) ax[1][0].set_title("SalePrice vs 1stFlrSF") sns.scatterplot(x="2ndFlrSF", y="SalePrice", data=dataset_train, ax=ax[1][1]) ax[1][1].set_title("SalePrice vs 2ndFlrSF") sns.scatterplot(x="TotalBsmtSF", y="SalePrice", data=dataset_train, ax=ax[1][2]) ax[1][2].set_title("SalePrice vs TotalBsmtSF") # #### Things we can learn from the previous analysis # - The lot area is not much correlated to the price, but maybe we can transform this column to get a better correlation (log?) # - The 1stFlrSF, 2ndFlrSF and TotalBsmtSF are correlated to the price but show an increase of the variance when the price increases. # - Summing all these values together give us a better correlation, with some outliers that we might have to remove or transform # ### Let's check if the overall quality and condition of the house have an impact on the price order_qual = ( dataset_train.groupby("OverallQual") .agg(np.mean) .sort_values("SalePrice") .index.to_list() ) order_cond = ( dataset_train.groupby("OverallCond") .agg(np.mean) .sort_values("SalePrice") .index.to_list() ) fig, ax = plt.subplots(2, 1, figsize=(22, 10)) sns.boxplot( x="OverallQual", y="SalePrice", data=dataset_train, order=order_qual, ax=ax[0] ) ax[0].set_title("Price vs OverallQual") sns.boxplot( x="OverallCond", y="SalePrice", data=dataset_train, order=order_qual, ax=ax[1] ) ax[1].set_title("Price vs OverallCond") # #### Things we can learn from the previous analysis # - The overall quality seems a good indicator of the price. # - The overall condition seems less good an indicator of the price, maybe with some binning we could get a better price indicator. # ### Let's see what we can learn from the construction year and remodeling year # order = dataset_train.groupby('Neighborhood').agg(np.mean).sort_values('SalePrice').index.to_list() fig, ax = plt.subplots(1, 1, figsize=(22, 7)) sns.boxplot(x="YearBuilt", y="SalePrice", data=dataset_train, ax=ax) ax.set_title("Price vs YearBuilt") dataset_train["DecadeBuilt"] = dataset_train["YearBuilt"] / 10 dataset_train["DecadeBuilt"] = dataset_train["DecadeBuilt"].astype(int) * 10 # dataset_train[['YearBuilt', 'DecadeBuilt']] fig, ax = plt.subplots(1, 1, figsize=(22, 7)) sns.boxplot(x="DecadeBuilt", y="SalePrice", data=dataset_train, ax=ax) ax.set_title("Price vs YearBuilt") # #### Things we can learn from the previous analysis # - The price of recent houses is a bit higher than houses between 1900 and 1970. # - House in the 1880's and the 1890's are also a bit more expensive. fig, ax = plt.subplots(1, 1, figsize=(22, 7)) sns.boxplot(x="YearRemodAdd", y="SalePrice", data=dataset_train, ax=ax) ax.set_title("Price vs YearRemodAdd") dataset_train["DecadeRemodAdd"] = dataset_train["YearRemodAdd"] / 10 dataset_train["DecadeRemodAdd"] = dataset_train["DecadeRemodAdd"].astype(int) * 10 # dataset_train[['YearBuilt', 'DecadeBuilt']] fig, ax = plt.subplots(1, 1, figsize=(22, 7)) sns.boxplot(x="DecadeRemodAdd", y="SalePrice", data=dataset_train, ax=ax) ax.set_title("Price vs DecadeRemodAdd") # #### Things we can learn from the previous analysis # - The price of houses that were remodeled very recently (2010) is also a bit higher than the rest. # - There seem to be a small increase of the price the more recent the remodelation was done # ### Let's see what we can learn from the number of room fig, ax = plt.subplots(1, 2, figsize=(22, 7)) sns.boxplot(x="BedroomAbvGr", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs number of bedrooms") sns.boxplot(x="TotRmsAbvGrd", y="SalePrice", data=dataset_train, ax=ax[1]) ax[1].set_title("Price vs number of rooms") # #### What can we learn from the previous analysis # - The number of rooms is correlated to the price, which was to be expected # - The number of bedroom presents something interesting. # - A house with 0 rooms above ground has a higher average price than houses with 1, 2 or 3 bedrooms above ground # - We need to dig a bit more into the houses with 0 bedroom. # - 1st idea, the number of rooms was badly indicated and we need to estimate it based on other features # - 2nd idea, the rooms are actually in the basement. # ### Let's dig a bit more in the houses with 0 bedroom above ground df_0bd = dataset_train.loc[dataset_train["BedroomAbvGr"] == 0] df_0bd[ [ "YearBuilt", "BldgType", "BsmtFinType1", "BsmtFinType2", "TotalBsmtSF", "GrLivArea", "TotRmsAbvGrd", "BsmtFullBath", "BsmtHalfBath", "FullBath", "HalfBath", "KitchenAbvGr", "SalePrice", ] ].head(20) df_0bd = dataset_test.loc[dataset_train["BedroomAbvGr"] == 0] df_0bd[ [ "YearBuilt", "BldgType", "BsmtFinType1", "BsmtFinType2", "TotalBsmtSF", "GrLivArea", "TotRmsAbvGrd", "BsmtFullBath", "BsmtHalfBath", "FullBath", "HalfBath", "KitchenAbvGr", ] ].head(20) # Actually there is only a few houses without bedrooms above ground, but some have a very high number of rooms. # There is probably a way of estimating the number of bedrooms # ### Let's check if the conditions of the sales and the time of the sale have an impact on the price fig, ax = plt.subplots(2, 3, figsize=(22, 10)) sns.boxplot(x="SaleType", y="SalePrice", data=dataset_train, ax=ax[0][0]) ax[0][0].set_title("Price vs SaleType") sns.boxplot(x="MoSold", y="SalePrice", data=dataset_train, ax=ax[0][1]) ax[0][1].set_title("Price vs month of sale") sns.boxplot(x="YrSold", y="SalePrice", data=dataset_train, ax=ax[0][2]) ax[0][2].set_title("Price vs year of sale") sns.countplot(x="SaleType", data=dataset_train, ax=ax[1][0]) ax[1][0].set_title("Price vs SaleType") sns.countplot(x="MoSold", data=dataset_train, ax=ax[1][1]) ax[1][1].set_title("Price vs month of sale") sns.countplot(x="YrSold", data=dataset_train, ax=ax[1][2]) ax[1][2].set_title("Price vs year of sale") # The year and month of sale does not seem to have an impact on the price. # The sale type seems to have an impact but outside of new houses I don't think it is usefull to consider other types of sale because of the population size which is very small. # # 4. Data cleaning and feature selection # Let's start by creating some new columns as we stated in the column analysis. # ## 4.1 Feature creation # ### Columns we will transform # - OverallQual & OverallCond, we can probably aggregate these columns together # - YearBuilt & YearRemodAdd # - Check if all these columns ExterQual, ExterCond, BsmtCond, BsmtFinType1, BsmtFinType2, HeatingQC, KitchenQual, GarageQual and GarageCond give us a good aproximation of overall quality and overall condition or if there was some errors during the calculation # - TotalSF = TotalBsmtSF + 1stFlrSF + 2ndFlrSF # - TotalBath = FullBath + HalfBath + BsmtFullBath + BsmtHalfBath # - TotalPorch = WoodDeckSF + openPorchSF + EnclosedPorch + 3SsnPorch + ScreenPorch # #### Let's start with a study of the quality and condition of the house dataset_train[ [ "OverallQual", "ExterQual", "BsmtQual", "HeatingQC", "FireplaceQu", "GarageQual", "PoolQC", ] ] dataset_train[["OverallCond", "ExterCond", "BsmtCond", "HeatingQC", "GarageCond"]] # There is differences of OverallCond between houses with the same score for ExterCond, BsmtCond, HeatingQC, GarageCond. # We could recalculate a new score for the overall condition. for col in ["ExterCond", "BsmtCond", "HeatingQC", "GarageCond"]: dataset_train[col] = dataset_train[col].map( {"Ex": 9, "Gd": 7, "TA": 5, "Fa": 3, "NA": np.nan}, na_action="ignore" ) dataset_train["OverallCond_calculated"] = dataset_train[ ["ExterCond", "BsmtCond", "HeatingQC", "GarageCond"] ].mean(axis=1) dataset_train["OverallCond"] = dataset_train["OverallCond"].astype(float) df = dataset_train.loc[ abs(dataset_train["OverallCond"] - dataset_train["OverallCond_calculated"]) > 1 ] dataset_train["OverallCond_calculated"] = dataset_train[ "OverallCond_calculated" ].astype(int) dataset_train["OverallCond_calculated_2"] = ( dataset_train[["OverallCond_calculated", "OverallCond"]].mean(axis=1).astype(int) ) fig, ax = plt.subplots(3, 1, figsize=(22, 15)) sns.boxplot(x="OverallCond", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs OverallQual") sns.boxplot(x="OverallCond_calculated", y="SalePrice", data=dataset_train, ax=ax[1]) ax[1].set_title("Price vs OverallCond calculated") sns.boxplot(x="OverallCond_calculated_2", y="SalePrice", data=dataset_train, ax=ax[2]) ax[2].set_title("Price vs OverallCond calculated 2") # It seems that by averaging the overall condition score and our calculated condition score and binning the result into two categories: # good condition (>=5) and average condition (<5) we could get a better feature. # #### Let's now check if the calculus of square feet is proprerly made. # TotalSF = TotalBsmtSF + 1stFlrSF + 2ndFlrSF + BsmtUnfSF + LowQualFinSF dataset_train[ [ "TotalSF", "GrLivArea", "TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "BsmtUnfSF", "LowQualFinSF", ] ] # We can see that it doesn't include the basement SF which in my opinion is a mistake, for example on row number 1458, the house has half its surface in the basement. # There must be some value in the surface. # #### Let's see if we can estimathe number of bedrooms len(dataset_train.loc[dataset_train["BedroomAbvGr"] == 0]), len( dataset_test.loc[dataset_test["BedroomAbvGr"] == 0] ) dataset_train.loc[dataset_train["BedroomAbvGr"] == 0] # df = dataset_train.loc[abs(dataset_train['TotRmsAbvGrd'] - dataset_train['KitchenAbvGr'] - dataset_train['BedroomAbvGr']) >= 4] df = dataset_train.copy() df["bedrooms_ratio"] = dataset_train["BedroomAbvGr"] / dataset_train["TotRmsAbvGrd"] df[["TotRmsAbvGrd", "KitchenAbvGr", "BedroomAbvGr", "bedrooms_ratio"]].describe() df.loc[ (df["bedrooms_ratio"] <= 0.2) | (df["bedrooms_ratio"] >= 0.8), ["TotRmsAbvGrd", "KitchenAbvGr", "BedroomAbvGr", "bedrooms_ratio"], ].head(20) fig, ax = plt.subplots(1, 2, figsize=(22, 7)) sns.histplot(x="bedrooms_ratio", data=df, stat="probability", ax=ax[0]) ax[0].set_title("Count of bedrooms_ratio") sns.scatterplot(x="bedrooms_ratio", y="TotRmsAbvGrd", data=df, ax=ax[1]) ax[1].set_title("TotRmsAbvGrd vs bedrooms ratio") # Let's try to increase the have a bedroom ratio between 0.5 and 0.8 or 0.6 for big houses. # We will increase or decrease the number of bedrooms until we reach a satisfactory ratio. mask = ( ((df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4)) | ((df["bedrooms_ratio"] <= 0.33) & (df["TotRmsAbvGrd"] >= 5)) ) | ( ((df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 6)) | ((df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 5)) ) df.loc[mask, ["TotRmsAbvGrd", "KitchenAbvGr", "BedroomAbvGr", "bedrooms_ratio"]].head( 20 ) # Let's first put a kitchen in every house df.loc[df["KitchenAbvGr"] == 0, "KitchenAbvGr"] = 1 mask = ( ((df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4)) | ((df["bedrooms_ratio"] <= 0.33) & (df["TotRmsAbvGrd"] >= 5)) ) | ( ((df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5)) | ((df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4)) ) while len(df.loc[mask]) != 0: # small houses, this means that small house have more bedrooms than other room df.loc[ (df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4), "BedroomAbvGr" ] += 1 df.loc[ (df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4), "BedroomAbvGr" ] -= 1 # big houses df.loc[ (df["bedrooms_ratio"] < 0.33) & (df["TotRmsAbvGrd"] >= 5), "BedroomAbvGr" ] += 1 df.loc[ (df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5), "BedroomAbvGr" ] -= 1 # update value of bedroom ratio df.loc[:, "bedrooms_ratio"] = df["BedroomAbvGr"] / df["TotRmsAbvGrd"] mask = ( ((df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4)) | ((df["bedrooms_ratio"] <= 0.33) & (df["TotRmsAbvGrd"] >= 5)) ) | ( ((df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5)) | ((df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4)) ) df[["TotRmsAbvGrd", "KitchenAbvGr", "BedroomAbvGr", "bedrooms_ratio"]].head(30) df[["TotRmsAbvGrd", "KitchenAbvGr", "BedroomAbvGr", "bedrooms_ratio"]].describe() fig, ax = plt.subplots(2, 2, figsize=(22, 15)) sns.boxplot(x="BedroomAbvGr", y="SalePrice", data=df, ax=ax[0][0]) ax[0][0].set_title("Price vs number of bedrooms after normalization") sns.boxplot(x="BedroomAbvGr", y="SalePrice", data=dataset_train, ax=ax[0][1]) ax[0][1].set_title("Price vs number of bedrooms before normalization") sns.countplot(x="BedroomAbvGr", data=df, ax=ax[1][0]) ax[1][0].set_title("Count of number of bedrooms after normalization") sns.countplot(x="BedroomAbvGr", data=dataset_train, ax=ax[1][1]) ax[1][1].set_title("Count of number of bedrooms before normalization") # We almost didn't change the distribution of bedrooms in the houses but now we have a more logicalrelation between number of bedrooms and SalePrice. # #### Let's now see if we can normalize the number of rooms and bedrooms by the size of the house. dataset_train["SF_per_BedroomAbvGr"] = ( dataset_train["TotalSF"] / dataset_train["BedroomAbvGr"] ) dataset_train["SF_per_TotRmsAbvGrd_per_sf"] = ( dataset_train["TotalSF"] / dataset_train["TotRmsAbvGrd"] ) fig, ax = plt.subplots(1, 2, figsize=(22, 7)) sns.scatterplot(x="SF_per_BedroomAbvGr", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs SF_per_BedroomAbvGr") sns.scatterplot( x="SF_per_TotRmsAbvGrd_per_sf", y="SalePrice", data=dataset_train, ax=ax[1] ) ax[1].set_title("Price vs SF_per_TotRmsAbvGrd_per_sf") # #### Let's check the total number of bath dataset_train["Total_bath"] = dataset_train[ ["FullBath", "HalfBath", "BsmtFullBath", "BsmtHalfBath"] ].sum(axis=1) fig, ax = plt.subplots(1, 2, figsize=(22, 7)) sns.boxplot(x="Total_bath", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs total number of bath") sns.countplot(x="Total_bath", data=dataset_train, ax=ax[1]) ax[1].set_title("Price vs SF_per_TotRmsAbvGrd_per_sf") # We could categorize the total number of baths with 1, 2, 3 and 4+ number of bath # #### Let's see if the total porch area is a good estimator of the price of the house. dataset_train["Total_porch_SF"] = dataset_train[ ["WoodDeckSF", "OpenPorchSF", "EnclosedPorch", "3SsnPorch", "ScreenPorch"] ].sum(axis=1) fig, ax = plt.subplots(1, 2, figsize=(22, 7)) sns.scatterplot(x="Total_porch_SF", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs Total_porch_SF") # The total size of the porch does not seem to be a good estimator of the SalePrice # #### Let's look at the garage size fig, ax = plt.subplots(1, 3, figsize=(22, 7)) sns.boxplot(x="GarageCars", y="SalePrice", data=dataset_train, ax=ax[0]) ax[0].set_title("Price vs GarageCars") sns.scatterplot(x="GarageArea", y="SalePrice", data=dataset_train, ax=ax[1]) ax[1].set_title("Price vs GarageArea") sns.countplot(x="GarageCars", data=dataset_train, ax=ax[2]) ax[2].set_title("Count of GarageCars") # We could do like with Total_bath, 0, 1, 2 or 3+ categories for GarageCars # #### Let's investigate a bit more the feature that don't appear often in the house like pool, fireplace and Misc and their value fig, ax = plt.subplots(2, 3, figsize=(22, 12)) sns.boxplot(x="MiscFeature", y="SalePrice", data=dataset_train, ax=ax[0][0]) ax[0][0].set_title("Price vs MiscFeature") sns.scatterplot(x="PoolArea", y="SalePrice", data=dataset_train, ax=ax[0][1]) ax[0][1].set_title("Price vs PoolArea") sns.scatterplot(x="MiscVal", y="SalePrice", data=dataset_train, ax=ax[0][2]) ax[0][2].set_title("SalePrice of MiscVal") sns.boxplot(x="Fireplaces", y="SalePrice", data=dataset_train, ax=ax[1][0]) ax[1][0].set_title("Price vs Fireplaces") sns.boxplot(x="FireplaceQu", y="SalePrice", data=dataset_train, ax=ax[1][1]) ax[1][1].set_title("Price vs FireplaceQu") sns.countplot(x="Fireplaces", data=dataset_train, ax=ax[1][2]) ax[1][2].set_title("Count of Fireplaces") # It does not seem interesting to take into account these Misc features. # Maybe we could integrate the number of fireplaces. # ## 4.2 Checking for missing data # For now the features we are considering are : # - MSZoning, Neighborhood # - OverallQual & OverallCond, ExterCond, BsmtCond, HeatingQC, GarageCond # - YearBuilt & YearRemodAdd # - SaleType # - Bedroom & TotRmsAbvGrd, FullBath + HalfBath + BsmtFullBath + BsmtHalfBath, Kitchen # - TotalSF # - GarageCars # dataset_train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) dataset_test = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ) train = dataset_train[ [ "MSZoning", "Neighborhood", "OverallQual", "OverallCond", "ExterCond", "BsmtCond", "HeatingQC", "GarageCond", "YearBuilt", "YearRemodAdd", "SaleType", "BedroomAbvGr", "TotRmsAbvGrd", "FullBath", "HalfBath", "BsmtFullBath", "BsmtHalfBath", "KitchenAbvGr", "GrLivArea", "GarageCars", ] ] train.isna().sum() train.describe() test = dataset_test[ [ "MSZoning", "Neighborhood", "OverallQual", "OverallCond", "ExterCond", "BsmtCond", "HeatingQC", "GarageCond", "YearBuilt", "YearRemodAdd", "SaleType", "BedroomAbvGr", "TotRmsAbvGrd", "FullBath", "HalfBath", "BsmtFullBath", "BsmtHalfBath", "KitchenAbvGr", "GrLivArea", "GarageCars", ] ] test.isna().sum() # ## 4.3 Checking for similar distribution in the train and test datasets def hist_train_test(train, test, col_type="numerical"): train["type"] = "train" test["type"] = "test" total = pd.concat([test, train]) if col_type == "numerical": cols = total.select_dtypes(include=np.number).columns.to_list() if col_type == "categorical": cols = total.select_dtypes(include=["object", "category"]).columns.to_list() nb_rows = int(round(0.9 + (len(cols) / 3), 0)) i = j = 0 fig, ax = plt.subplots(nb_rows, 3, figsize=(22, nb_rows * 6)) for col in cols: if col_type == "numerical": ax[i][j] = sns.histplot( x=col, data=total, hue="type", multiple="dodge", bins=20, ax=ax[i][j] ) if col_type == "categorical": ax[i][j] = sns.countplot(x=col, data=total, hue="type", ax=ax[i][j]) if j == 2: i = i + 1 j = (j + 1) % 3 hist_train_test(train, test, "numerical") hist_train_test(train, test, "categorical") # The distribution of the data between train set and test set is very close. # We don't need to modify our data. # ## 4.4 Preprocessing pipeline # Steps to include in the preprocessing pipeline: # - Feature engineering # - Calculate TotalSF # - Create a square feet per room and square feet per bedroom feature # - Create a BuiltDecade and RemodallDecade # - Recalculate the number of bedrooms # - MSZoning, put the three smallest categories together in other # - SaleType, put the smallest categories together in other # - Total_bath = FullBath + HalfBath + BsmtFullBath + BsmtHalfBath an then we categorize them with 1, 2, 3, or 4+ number of bath # - Impute missing values # - Either with most frequent value, the median or None # - Remove outliers # - Remove two properties with very high SF and low price # - Apply log transform to columns # - Encode feature # - Neighborhood # - MSZoning # - SaleType # - Binning of features # - OverallCond into two categories: good condition (>=5) and average condition (<5) # - GarageCars 0, 1, 2, 3+ # - Fireplaces 0, 1, 2+ # - Scale features (standard scaler) # - Remove feature (the remaining) # Preprocessing functions def remove_outliers(df): df.drop(df[df["GrLivArea"] > 4000].index, inplace=True) return df def get_TotalSF(df): df["TotalSF"] = df["1stFlrSF"] + df["2ndFlrSF"] + df["TotalBsmtSF"] return df def get_age(df): df["age"] = df["YrSold"] - df["YearBuilt"] return df def apply_label_encoder(df): cols = ( "FireplaceQu", "BsmtQual", "BsmtCond", "GarageQual", "GarageCond", "ExterQual", "ExterCond", "HeatingQC", "PoolQC", "KitchenQual", "BsmtFinType1", "BsmtFinType2", "Functional", "Fence", "BsmtExposure", "GarageFinish", "LandSlope", "LotShape", "PavedDrive", "Street", "Alley", "CentralAir", "OverallCond", "YrSold", "MoSold", ) for c in cols: lbl = LabelEncoder() lbl.fit(list(df[c].values)) df[c] = lbl.transform(list(df[c].values)) return df def reformat_categorical_data(df): df["MSSubClass"] = df["MSSubClass"].apply(str) df["OverallCond"] = df["OverallCond"].astype(str) df["YrSold"] = df["YrSold"].astype(str) df["MoSold"] = df["MoSold"].astype(str) return df def get_house_particularities(df): df["Has_shed"] = (df["MiscFeature"] == "Shed") * 1.0 df["Remodeled"] = (df["YearRemodAdd"] != df["YearBuilt"]) * 1 df["Recent_remodel"] = (df["YearRemodAdd"] == df["YrSold"]) * 1 df["Very_new_house"] = (df["YearBuilt"] == df["YrSold"]) * 1 df["Has_2nd_floor"] = (df["2ndFlrSF"] != 0) * 1 df["Has_pool"] = (df["PoolArea"] != 0) * 1 df["Number_of_porch"] = ( (df["OpenPorchSF"] != 0) * 1 + (df["EnclosedPorch"] != 0) * 1 + (df["3SsnPorch"] != 0) * 1 + (df["ScreenPorch"] != 0) * 1 ) df["Has_porch"] = (df["Number_of_porch"] != 0) * 1 df["Has_Wood_deck"] = (df["WoodDeckSF"] != 0) * 1 return df def get_Total_bath(df): df["Total_bath"] = ( df["FullBath"] + df["HalfBath"] + df["BsmtFullBath"] + df["BsmtHalfBath"] ) df.loc[df["Total_bath"] > 4, "Total_bath"] = 4 return df def normalize_SF_per_room(df): df["SF_per_BedroomAbvGr"] = df["TotalSF"] / df["BedroomAbvGr"] df["SF_per_TotRmsAbvGrd"] = df["TotalSF"] / df["TotRmsAbvGrd"] return df def get_decade(df): df["DecadeBuilt"] = (df["YearBuilt"] / 10).astype(int) * 10 df["DecadeRemodAdd"] = (df["YearRemodAdd"] / 10).astype(int) * 10 return df def get_estimation_OverallCond(df): for col in ["ExterCond", "BsmtCond", "HeatingQC", "GarageCond"]: df[col + "_"] = df[col].map( {"Ex": 9, "Gd": 7, "TA": 5, "Fa": 3, "NA": np.nan}, na_action="ignore" ) df["OverallCond_calculated_1"] = df[ ["ExterCond_", "BsmtCond_", "HeatingQC_", "GarageCond_"] ].mean(axis=1) df["OverallCond"] = df["OverallCond"].astype(float) df["OverallCond_calculated_2"] = ( df[["OverallCond_calculated_1", "OverallCond"]].mean(axis=1).astype(int) ) # df = df.drop(columns=['ExterCond_', 'BsmtCond_', 'HeatingQC_', 'GarageCond_'], axis=1) return df def get_estimation_bedrooms(df): df.loc[:, "bedrooms_ratio"] = df["BedroomAbvGr"] / df["TotRmsAbvGrd"] mask = ( ((df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4)) | ((df["bedrooms_ratio"] <= 0.33) & (df["TotRmsAbvGrd"] >= 5)) ) | ( ((df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5)) | ((df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4)) ) while len(df.loc[mask]) != 0: # small houses, this means that small house have more bedrooms than other room df.loc[ (df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4), "BedroomAbvGr" ] += 1 df.loc[ (df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4), "BedroomAbvGr" ] -= 1 # big houses df.loc[ (df["bedrooms_ratio"] < 0.33) & (df["TotRmsAbvGrd"] >= 5), "BedroomAbvGr" ] += 1 df.loc[ (df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5), "BedroomAbvGr" ] -= 1 # update value of bedroom ratio df.loc[:, "bedrooms_ratio"] = df["BedroomAbvGr"] / df["TotRmsAbvGrd"] mask = ( ((df["bedrooms_ratio"] < 0.5) & (df["TotRmsAbvGrd"] <= 4)) | ((df["bedrooms_ratio"] <= 0.33) & (df["TotRmsAbvGrd"] >= 5)) ) | ( ((df["bedrooms_ratio"] >= 0.7) & (df["TotRmsAbvGrd"] >= 5)) | ((df["bedrooms_ratio"] >= 0.8) & (df["TotRmsAbvGrd"] <= 4)) ) return df def bin_MSZoning(df): df.loc[(df["MSZoning"] != "RL") & (df["MSZoning"] != "RM"), "MSZoning"] = "other" return df def bin_SaleType(df): df.loc[(df["SaleType"] != "WD") & (df["SaleType"] != "new"), "SaleType"] = "other" return df def bin_column(df, col, bins, labels): df[col] = pd.cut(df[col], bins=bins, labels=labels) df[col] = df[col].astype(float) return df def log_transform(df): numeric_features = df.dtypes[df.dtypes != "object"].index skewed = df[numeric_features].apply(lambda x: skew(x.dropna().astype(float))) skewed = skewed[skewed > 0.75] skewed = skewed.index df[skewed] = np.log1p(df[skewed]).astype(float) return df def impute_missing_data(df): df.fillna({"MSZoning": df["MSZoning"].mode().iloc[0]}, inplace=True) df.fillna({"SaleType": df["SaleType"].mode().iloc[0]}, inplace=True) df.fillna({"Electrical": df["Electrical"].mode()[0]}, inplace=True) df.fillna({"KitchenQual": df["KitchenQual"].mode()[0]}, inplace=True) df.fillna({"Exterior1st": df["Exterior1st"].mode()[0]}, inplace=True) df.fillna({"Exterior2nd": df["Exterior2nd"].mode()[0]}, inplace=True) df.loc[df["KitchenAbvGr"] == 0, "KitchenAbvGr"] = 1 df["LotFrontage"] = df.groupby("Neighborhood")["LotFrontage"].transform( lambda x: x.fillna(x.median()) ) df.fillna( { "GarageCars": 0, "BsmtFullBath": 0, "BsmtHalfBath": 0, "TotalBsmtSF": 0, "MSSubClass": "None", "HeatingQC": "None", "PoolQC": "None", "MiscFeature": "None", "Alley": "None", "Fence": "None", "FireplaceQu": "None", "GarageType": "None", "GarageFinish": "None", "GarageQual": "None", "GarageCond": "NA", "ExterCond": "NA", "GarageYrBlt": 0, "GarageArea": 0, "GarageCars": 0, "BsmtQual": "None", "BsmtCond": "NA", "BsmtFinType1": "None", "BsmtExposure": "None", "BsmtFinType2": "None", "MasVnrArea": 0, "MasVnrType": "None", "Functional": "Typ", }, inplace=True, ) return df def drop_cols(df): df = df.drop(columns=["Id"]) # df = df.drop(df.columns.difference(['MSZoning','SaleType', 'TotalSF', 'Total_bath', 'SF_per_BedroomAbvGr', 'SF_per_TotRmsAbvGrd', 'DecadeBuilt', # 'DecadeRemodAdd', 'OverallCond_calculated_2', 'GarageCars', 'OverallQual', 'Neighborhood', 'bedrooms_ratio', 'BedroomAbvGr', 'TotRmsAbvGrd', # 'KitchenAbvGr', 'Fireplaces', 'age', 'Has_shed', 'Remodeled','Recent_remodel','Very_new_house','Has_2nd_floor','Number_of_porch', # 'Has_porch','Has_Wood_deck','Has_pool','SalePrice','','','','','']), 1, inplace=True) return df train = pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/train.csv" ) test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv") train = remove_outliers(train) combine = [train, test] for df in combine: df = impute_missing_data(df) # df = get_age(df) # df = get_house_particularities(df) # df = get_TotalSF(df) # df = get_Total_bath(df) # df = get_estimation_bedrooms(df) # df = normalize_SF_per_room(df) # df = get_decade(df) # df = get_estimation_OverallCond(df) df = reformat_categorical_data(df) # df = bin_MSZoning(df) # df = bin_SaleType(df) # df = bin_column(df, 'GarageCars', bins=[-0.5,0.5,1.5,2.5,10], labels=[0,1,2,3]) # df = bin_column(df, 'Fireplaces', bins=[-0.5,0.5,1.5,10], labels=[0,1,2]) # df = bin_column(df, 'Total_bath', bins=[-0.5,1.5,2.5,3.5,10], labels=[1,2,3,4]) # df = bin_column(df, 'BedroomAbvGr', bins=[-0.5,2.5,3.5,4.5,20], labels=[2,3,4,5]) # df = bin_column(df, 'TotRmsAbvGrd', bins=[-0.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,20], labels=[3,4,5,6,7,8,9,10]) df = log_transform(df) df = drop_cols(df) # df = impute_missing_data(df) (train.shape, test.shape) preprocessing = make_column_transformer( ( OneHotEncoder(handle_unknown="ignore", sparse=False), train.select_dtypes(include=["object", "category"]).columns.to_list(), ), remainder="passthrough", ) # # 5. Model Preparation # dataset_train = pd.read_csv("/kaggle/input/titanic/train.csv") y = np.ravel(np.array([train["SalePrice"]]).T) X = train.drop(columns=["SalePrice"]) X_pred = test.copy() folds = KFold(n_splits=10, shuffle=True, random_state=0) pipeline = Pipeline( [ ("preprocessing", preprocessing), ("scaler", RobustScaler()), # ('scaler' , StandardScaler()), # ('pca' , PCA(n_components=10)) ] ) pipeline.fit(X) X_preprocessed = pd.DataFrame(pipeline.transform(X)) X_preprocessed.shape X_pred_preprocessed = pd.DataFrame(pipeline.transform(X_pred)) X_pred_preprocessed = X_pred_preprocessed.fillna(X_pred_preprocessed.median()) X_pred_preprocessed.shape # # 6. Pipeline results models_list = { "Ridge": Ridge(), "DecisionTree Regressor": DecisionTreeRegressor(), "Random Forest": RandomForestRegressor(), "SVR": SVR(), "LGBMRegressor": LGBMRegressor(verbosity=0, force_row_wise=True), "XGBRegressor": XGBRegressor(use_label_encoder=False, verbosity=0), "CatBoostRegressor": CatBoostRegressor(verbose=0), "Lasso": Lasso(alpha=0.0005), "KernelRidge": KernelRidge(), "ElasticNet": ElasticNet(alpha=0.0004, l1_ratio=0.9), "BayesianRidge": BayesianRidge(), "GradientBoostingRegressor": GradientBoostingRegressor(), } model_perf_matrix = [] for model_name, model in models_list.items(): pipeline = Pipeline( [ ("preprocessing", preprocessing), ("scaler", RobustScaler()), # ('scaler' , StandardScaler()), # ('pca' , PCA(n_components=10)), ("model", model), ] ) cv_score = np.sqrt( -cross_val_score(pipeline, X, y, cv=folds, scoring="neg_mean_squared_error") ) model_perf_matrix.append( [model_name, round(cv_score.mean(), 4), round(cv_score.std(), 4)] ) df_model_perf = pd.DataFrame( model_perf_matrix, columns=["Model", "Mean value", "Std value"] ) df_model_perf models_list = { "CatBoostRegressor": { "model": CatBoostRegressor(verbose=0), "param_grid": {"max_depth": [3, 4, 5, 6, 8], "n_estimators": [100, 200, 300]}, }, "BayesianRidge": { "model": BayesianRidge(), "param_grid": { "alpha_1": [1e-7, 1e-6], "alpha_2": [1e-6, 1e-5], "lambda_1": [1e-6, 1e-5], "lambda_2": [1e-7, 1e-6], }, }, "Ridge": { "model": Ridge(), "param_grid": {"alpha": [7, 10, 10.5, 11, 11.5, 12, 15]}, }, "Lasso": { "model": Lasso(), "param_grid": {"alpha": [1e-4, 0.00025, 0.0005, 0.00075, 1e-3]}, }, "ElasticNet": { "model": ElasticNet(), "param_grid": { "alpha": [0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006], "l1_ratio": [0, 7, 0, 8, 0.9, 1, 1.1], }, }, "SVR": { "model": SVR(), "param_grid": { "gamma": [1e-5, 1e-4, 1e-3, 0.01], "C": [10, 50, 100, 150, 200], "epsilon": [1e-3, 0.005, 0.01, 0.2], }, }, } results = {} for model_name, model in models_list.items(): best_model = GridSearchCV( estimator=model["model"], param_grid=model["param_grid"], cv=folds, scoring="neg_mean_squared_error", ) best_model.fit(X_preprocessed, y) print(model_name) print(best_model.best_params_) best_model.best_score_ = np.sqrt(-best_model.best_score_) print( "Mean score : ", best_model.best_score_, " Std : ", best_model.cv_results_["std_test_score"][best_model.best_index_], ) model_results = { "estimator": best_model.best_estimator_, "best_params": best_model.best_params_, "mean": best_model.best_score_, "std": best_model.cv_results_["std_test_score"][best_model.best_index_], } results[model_name] = model_results # ### The four different models all have very similar scores, let's use all these models in an ensemble classifier # create a dictionary of our models estimators = [] for model_name, model in results.items(): estimators.append((model_name, model["estimator"])) # create our voting classifier, inputting our models ensemble = VotingRegressor(estimators) ensemble.fit(X_preprocessed, y) # # 7. Prediction ensemble.predict(X_pred_preprocessed) y_pred = np.floor(np.expm1(ensemble.predict(X_pred_preprocessed))) output = pd.DataFrame( { "Id": pd.read_csv( "/kaggle/input/house-prices-advanced-regression-techniques/test.csv" ).Id, "SalePrice": y_pred, } ) # q1 = output['SalePrice'].quantile(0.005) # q2 = output['SalePrice'].quantile(0.995) # output['SalePrice'] = output['SalePrice'].apply(lambda x: x if x > q1 else x*0.77) # output['SalePrice'] = output['SalePrice'].apply(lambda x: x if x < q2 else x*1.1) output output.to_csv("my_submission.csv", index=False) print("Your submission was successfully saved!")
false
0
14,822
0
14,822
14,822
69433636
<jupyter_start><jupyter_text>Formula 1 World Championship (1950 - 2023) ### Context Formula 1 (a.k.a. F1 or Formula One) is the highest class of single-seater auto racing sanctioned by the Fédération Internationale de l'Automobile (FIA) and owned by the Formula One Group. The FIA Formula One World Championship has been one of the premier forms of racing around the world since its inaugural season in 1950. The word "formula" in the name refers to the set of rules to which all participants' cars must conform. A Formula One season consists of a series of races, known as Grands Prix, which take place worldwide on purpose-built circuits and on public roads. ### Content The dataset consists of all information on the Formula 1 races, drivers, constructors, qualifying, circuits, lap times, pit stops, championships from 1950 till the latest 2021 season. Kaggle dataset identifier: formula-1-world-championship-1950-2020 <jupyter_code>import pandas as pd df = pd.read_csv('formula-1-world-championship-1950-2020/races.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 1101 entries, 0 to 1100 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 raceId 1101 non-null int64 1 year 1101 non-null int64 2 round 1101 non-null int64 3 circuitId 1101 non-null int64 4 name 1101 non-null object 5 date 1101 non-null object 6 time 1101 non-null object 7 url 1101 non-null object 8 fp1_date 1101 non-null object 9 fp1_time 1101 non-null object 10 fp2_date 1101 non-null object 11 fp2_time 1101 non-null object 12 fp3_date 1101 non-null object 13 fp3_time 1101 non-null object 14 quali_date 1101 non-null object 15 quali_time 1101 non-null object 16 sprint_date 1101 non-null object 17 sprint_time 1101 non-null object dtypes: int64(4), object(14) memory usage: 155.0+ KB <jupyter_text>Examples: { "raceId": 1, "year": 2009, "round": 1, "circuitId": 1, "name": "Australian Grand Prix", "date": "2009-03-29 00:00:00", "time": "06:00:00", "url": "http://en.wikipedia.org/wiki/2009_Australian_Grand_Prix", "fp1_date": "\\N", "fp1_time": "\\N", "fp2_date": "\\N", "fp2_time": "\\N", "fp3_date": "\\N", "fp3_time": "\\N", "quali_date": "\\N", "quali_time": "\\N", "sprint_date": "\\N", "sprint_time": "\\N" } { "raceId": 2, "year": 2009, "round": 2, "circuitId": 2, "name": "Malaysian Grand Prix", "date": "2009-04-05 00:00:00", "time": "09:00:00", "url": "http://en.wikipedia.org/wiki/2009_Malaysian_Grand_Prix", "fp1_date": "\\N", "fp1_time": "\\N", "fp2_date": "\\N", "fp2_time": "\\N", "fp3_date": "\\N", "fp3_time": "\\N", "quali_date": "\\N", "quali_time": "\\N", "sprint_date": "\\N", "sprint_time": "\\N" } { "raceId": 3, "year": 2009, "round": 3, "circuitId": 17, "name": "Chinese Grand Prix", "date": "2009-04-19 00:00:00", "time": "07:00:00", "url": "http://en.wikipedia.org/wiki/2009_Chinese_Grand_Prix", "fp1_date": "\\N", "fp1_time": "\\N", "fp2_date": "\\N", "fp2_time": "\\N", "fp3_date": "\\N", "fp3_time": "\\N", "quali_date": "\\N", "quali_time": "\\N", "sprint_date": "\\N", "sprint_time": "\\N" } { "raceId": 4, "year": 2009, "round": 4, "circuitId": 3, "name": "Bahrain Grand Prix", "date": "2009-04-26 00:00:00", "time": "12:00:00", "url": "http://en.wikipedia.org/wiki/2009_Bahrain_Grand_Prix", "fp1_date": "\\N", "fp1_time": "\\N", "fp2_date": "\\N", "fp2_time": "\\N", "fp3_date": "\\N", "fp3_time": "\\N", "quali_date": "\\N", "quali_time": "\\N", "sprint_date": "\\N", "sprint_time": "\\N" } <jupyter_code>import pandas as pd df = pd.read_csv('formula-1-world-championship-1950-2020/constructors.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 211 entries, 0 to 210 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 constructorId 211 non-null int64 1 constructorRef 211 non-null object 2 name 211 non-null object 3 nationality 211 non-null object 4 url 211 non-null object dtypes: int64(1), object(4) memory usage: 8.4+ KB <jupyter_text>Examples: { "constructorId": 1, "constructorRef": "mclaren", "name": "McLaren", "nationality": "British", "url": "http://en.wikipedia.org/wiki/McLaren" } { "constructorId": 2, "constructorRef": "bmw_sauber", "name": "BMW Sauber", "nationality": "German", "url": "http://en.wikipedia.org/wiki/BMW_Sauber" } { "constructorId": 3, "constructorRef": "williams", "name": "Williams", "nationality": "British", "url": "http://en.wikipedia.org/wiki/Williams_Grand_Prix_Engineering" } { "constructorId": 4, "constructorRef": "renault", "name": "Renault", "nationality": "French", "url": "http://en.wikipedia.org/wiki/Renault_in_Formula_One" } <jupyter_code>import pandas as pd df = pd.read_csv('formula-1-world-championship-1950-2020/results.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 26080 entries, 0 to 26079 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 resultId 26080 non-null int64 1 raceId 26080 non-null int64 2 driverId 26080 non-null int64 3 constructorId 26080 non-null int64 4 number 26080 non-null object 5 grid 26080 non-null int64 6 position 26080 non-null object 7 positionText 26080 non-null object 8 positionOrder 26080 non-null int64 9 points 26080 non-null float64 10 laps 26080 non-null int64 11 time 26080 non-null object 12 milliseconds 26080 non-null object 13 fastestLap 26080 non-null object 14 rank 26080 non-null object 15 fastestLapTime 26080 non-null object 16 fastestLapSpeed 26080 non-null object 17 statusId 26080 non-null int64 dtypes: float64(1), int64(8), object(9) memory usage: 3.6+ MB <jupyter_text>Examples: { "resultId": 1, "raceId": 18, "driverId": 1, "constructorId": 1, "number": 22, "grid": 1, "position": 1, "positionText": 1, "positionOrder": 1, "points": 10, "laps": 58, "time": "1:34:50.616", "milliseconds": 5690616, "fastestLap": 39, "rank": 2, "fastestLapTime": "1:27.452", "fastestLapSpeed": 218.3, "statusId": 1 } { "resultId": 2, "raceId": 18, "driverId": 2, "constructorId": 2, "number": 3, "grid": 5, "position": 2, "positionText": 2, "positionOrder": 2, "points": 8, "laps": 58, "time": "+5.478", "milliseconds": 5696094, "fastestLap": 41, "rank": 3, "fastestLapTime": "1:27.739", "fastestLapSpeed": 217.586, "statusId": 1 } { "resultId": 3, "raceId": 18, "driverId": 3, "constructorId": 3, "number": 7, "grid": 7, "position": 3, "positionText": 3, "positionOrder": 3, "points": 6, "laps": 58, "time": "+8.163", "milliseconds": 5698779, "fastestLap": 41, "rank": 5, "fastestLapTime": "1:28.090", "fastestLapSpeed": 216.719, "statusId": 1 } { "resultId": 4, "raceId": 18, "driverId": 4, "constructorId": 4, "number": 5, "grid": 11, "position": 4, "positionText": 4, "positionOrder": 4, "points": 5, "laps": 58, "time": "+17.181", "milliseconds": 5707797, "fastestLap": 58, "rank": 7, "fastestLapTime": "1:28.603", "fastestLapSpeed": 215.464, "statusId": 1 } <jupyter_code>import pandas as pd df = pd.read_csv('formula-1-world-championship-1950-2020/drivers.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 857 entries, 0 to 856 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 driverId 857 non-null int64 1 driverRef 857 non-null object 2 number 857 non-null object 3 code 857 non-null object 4 forename 857 non-null object 5 surname 857 non-null object 6 dob 857 non-null object 7 nationality 857 non-null object 8 url 857 non-null object dtypes: int64(1), object(8) memory usage: 60.4+ KB <jupyter_text>Examples: { "driverId": 1, "driverRef": "hamilton", "number": "44", "code": "HAM", "forename": "Lewis", "surname": "Hamilton", "dob": "1985-01-07", "nationality": "British", "url": "http://en.wikipedia.org/wiki/Lewis_Hamilton" } { "driverId": 2, "driverRef": "heidfeld", "number": "\\N", "code": "HEI", "forename": "Nick", "surname": "Heidfeld", "dob": "1977-05-10", "nationality": "German", "url": "http://en.wikipedia.org/wiki/Nick_Heidfeld" } { "driverId": 3, "driverRef": "rosberg", "number": "6", "code": "ROS", "forename": "Nico", "surname": "Rosberg", "dob": "1985-06-27", "nationality": "German", "url": "http://en.wikipedia.org/wiki/Nico_Rosberg" } { "driverId": 4, "driverRef": "alonso", "number": "14", "code": "ALO", "forename": "Fernando", "surname": "Alonso", "dob": "1981-07-29", "nationality": "Spanish", "url": "http://en.wikipedia.org/wiki/Fernando_Alonso" } <jupyter_script># # 1\. Descrição e metódo de cálculo # A ideia é analisar os dados de todos os anos da Formula 1 e definir o Campeão de todos os tempos. # Será utilizada uma base de dados de 1950 até 2020, porém o sistema de pontuação será utilizado do ano de 2020, conforme lista abaixo. # 1. 25 pontos; # 2. 18 pontos; # 3. 15 pontos; # 4. 12 pontos; # 5. 10 pontos; # 6. 8 pontos; # 7. 6 pontos; # 8. 4 pontos; # 9. 2 pontos; # 10. 1 ponto. # - Não será utilizado o ponto extra de volta mais rápida, pois o dataset esta incompleto. # # 2\. Montagem da Estrutura # Importação de Bibliotecas e Dataset # ## 2.1. Importação de Bibliotecas import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # ## 2.2. Carga de Dados df_drivers = pd.read_csv( "../input/formula-1-world-championship-1950-2020/drivers.csv", na_values="\\N" ) df_teams = pd.read_csv( "../input/formula-1-world-championship-1950-2020/constructors.csv", na_values="\\N" ) df_results = pd.read_csv( "../input/formula-1-world-championship-1950-2020/results.csv", na_values="\\N" ) df_races = pd.read_csv( "../input/formula-1-world-championship-1950-2020/races.csv", na_values="\\N" ) # # 3\. Exploração dos Dados # Visualização e tratamento dos dados do Dataset, além de separar os dados que serão utilizados. # ## 3.1. Visualização dos dados # Pilotos/Competidores df_drivers.head(n=20) # Times/Equipes df_teams.head(n=20) # Corridas df_races.head(n=20) # Resultados df_results.head(n=20) # ## 3.2. Tratamento dos Dados # Montagem da tabela principal de trabalho. # Geração dos data frames apenas com as colunas e dados que interessa. col_list_drivers = ["driverId", "forename", "surname", "code"] df_drivers_cl = df_drivers.filter(col_list_drivers, axis=1) col_list_teams = ["constructorId", "name"] df_teams_cl = df_teams.filter(col_list_teams, axis=1) col_list_races = ["raceId", "year", "round"] df_races_cl = df_races.filter(col_list_races, axis=1) # Além de selecionar as colunas que fazem sentido para a analise, selecionei apenas as linhas em que o piloto ficou entre as 10 primeiras colocações. col_list_results = [ "raceId", "driverId", "constructorId", "position", ] df_results_cl = df_results[df_results["position"] < 11].filter(col_list_results, axis=1) # Verificação dos data frames criados anteriormente print("Drivers") print(df_drivers_cl.head(n=5)) print("") print("Teams") print(df_teams_cl.head(n=5)) print("") print("Races") print(df_races_cl.head(n=5)) print("") print("Results") print(df_results_cl.head(n=20)) # # 4\. Análise dos dados # Analise dos dados e levantamento dos primeiros dados conforme metodo de calculo. # ## 4.1. Quantidade de Etapas por Temporada # O calculo será feito por etapas, em um primeiro momento é importante separar a quantidade de etapas de cada temporada. # Visualização da quantidade de Etapas por ano sns.set_theme(style="whitegrid") fig, axis = plt.subplots(1, 1, figsize=(20, 5), sharex=True) df_races_plot = df_races_cl["year"].value_counts().reset_index() df_races_plot.columns = ["Year", "Races"] df_races_plot.rename(columns={"Year": "Races"}) df_races_plot.sort_values(by=["Year"], inplace=True) df_races_plot.sort_values(by=["Year"]) graph = sns.barplot(x=df_races_plot["Year"], y=df_races_plot["Races"], palette="crest") graph.set(title="Qty races per Year", xlabel="Years", ylabel="Races") graph.set_xticklabels(labels=graph.get_xticklabels(), rotation=90) max = df_races_plot.describe() * 1.1 max_y = 0 _, max_y_f = graph.get_ylim() max_y = max_y_f if max_y_f > max_y else max_y graph.set(ylim=(0, max_y)) fig.show() df_races_plot.transpose() # ## 4.2. Total de pontos por temporada # Visualização do total de pontos de cada piloto e equipe por temporada. # Mudança dos valores de position e rank de Float para Int float_to_int = lambda value: int(value) test_values = [10.0, 55.0, 2.0, 6.0] values_int = list(map(float_to_int, test_values)) print(test_values) print(values_int) df_results_cl.dtypes df_results_cl["position"] = df_results_cl["position"].apply(float_to_int) # df_results_cl['rank'] = df_results_cl['rank'].apply(float_to_int) df_results_cl.dtypes # Adição da coluna com quantidade de pontos points = [1, 25, 18, 15, 12, 10, 8, 6, 4, 2, 1] df_results_cl["points"] = 0 df_results_cl["year"] = 0 for index, result in df_results_cl.iterrows(): result["points"] = points[result["position"]] for index, result in df_results_cl.iterrows(): raceId = int(result["raceId"]) year = int(df_races_cl[df_races_cl["raceId"] == raceId]["year"]) result["year"] = year df_results_cl # Criação da listagem de anos years = [] for year in df_races_cl["year"].sort_values(): if year not in years: years.append(year) print(years) # Criação do dataset de pilotos x anos x pontos que será utilizado nos próximos graficos df_drivers_years_points = df_results_cl.filter( ["driverId", "constructorId", "points", "year"], axis=1 ) points_drivers_array = [] points_teams_array = [] for year in years: points_year = df_drivers_years_points[df_drivers_years_points["year"] == year] for index, driver in df_drivers_cl.iterrows(): array = [] array.extend([driver["forename"], driver["surname"], driver["code"]]) array.append(year) array.append( points_year[points_year["driverId"] == driver["driverId"]].sum()["points"] ) points_drivers_array.append(array) for index, team in df_teams_cl.iterrows(): array = [] array.extend([team["name"], year]) array.append( points_year[points_year["constructorId"] == team["constructorId"]].sum()[ "points" ] ) points_teams_array.append(array) df_drivers_points = pd.DataFrame( points_drivers_array, columns=("forename", "surname", "code", "year", "points") ) df_teams_points = pd.DataFrame(points_teams_array, columns=("name", "year", "points")) # Visualização do dataframe dos piltos. Altere o valor de 1992 com o ano desejado. view_drivers = df_drivers_points[df_drivers_points["year"] == 1992] view_drivers = view_drivers[view_drivers["points"] > 0] view_drivers[view_drivers["year"] == 1992].sort_values(by=["points"], ascending=False) # Visualização do dataframe das equipes. Altere o valor de 1992 com o ano desejado. view_teams = df_teams_points[df_teams_points["year"] == 1992] view_teams = view_teams[view_teams["points"] > 0] view_teams[view_teams["year"] == 1992].sort_values(by=["points"], ascending=False) # Criação dos gráficos sns.set_theme(style="whitegrid") axis = 0 max_y = 0 columns = 2 rows = int(len(years) / columns) view, axes = plt.subplots(rows, columns, figsize=(30, 300), sharex=False) plt.subplots_adjust(hspace=0.4) axes = axes.flatten() for year in years: plot = df_drivers_points[df_drivers_points["year"] == year] plot = plot[plot["points"] > 0].sort_values(by=["points"], ascending=False, axis=0) fig = sns.barplot( x=plot["forename"], y=plot["points"], ax=axes[axis], ci=None, palette="crest" ) fig.set(title=f"Standings of F1 {year}", xlabel="Drivers", ylabel="Points") fig.set_xticklabels(labels=fig.get_xticklabels(), rotation=90) axis += 1 max = plot.describe() * 1.1 _, max_y_f = fig.get_ylim() max_y = max_y_f if max_y_f > max_y else max_y fig.set(ylim=(0, max_y)) view.show() # Criação dos gráficos sns.set_theme(style="whitegrid") axis = 0 max_y = 0 columns = 2 rows = int(len(years) / columns) view, axes = plt.subplots(rows, columns, figsize=(30, 300), sharex=False) plt.subplots_adjust(hspace=0.4) axes = axes.flatten() for year in years: plot = df_teams_points[df_teams_points["year"] == year] plot = plot[plot["points"] > 0].sort_values(by=["points"], ascending=False, axis=0) fig = sns.barplot( x=plot["name"], y=plot["points"], ax=axes[axis], ci=None, palette="crest" ) fig.set(title=f"Standings of F1 {year}", xlabel="Teams", ylabel="Points") fig.set_xticklabels(labels=fig.get_xticklabels(), rotation=90) axis += 1 max = plot.describe() * 1.1 _, max_y_f = fig.get_ylim() max_y = max_y_f if max_y_f > max_y else max_y fig.set(ylim=(0, max_y)) view.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/433/69433636.ipynb
formula-1-world-championship-1950-2020
rohanrao
[{"Id": 69433636, "ScriptId": 18860552, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7954566, "CreationDate": "07/31/2021 01:11:06", "VersionNumber": 3.0, "Title": "Formula 1 - Data Analysis", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 262.0, "LinesInsertedFromPrevious": 63.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 199.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92595477, "KernelVersionId": 69433636, "SourceDatasetVersionId": 2409119}]
[{"Id": 2409119, "DatasetId": 468218, "DatasourceVersionId": 2451205, "CreatorUserId": 113389, "LicenseName": "CC0: Public Domain", "CreationDate": "07/09/2021 08:59:30", "VersionNumber": 13.0, "Title": "Formula 1 World Championship (1950 - 2023)", "Slug": "formula-1-world-championship-1950-2020", "Subtitle": "F1 race data from 1950 to 2023", "Description": "### Context\nFormula 1 (a.k.a. F1 or Formula One) is the highest class of single-seater auto racing sanctioned by the F\u00e9d\u00e9ration Internationale de l'Automobile (FIA) and owned by the Formula One Group. The FIA Formula One World Championship has been one of the premier forms of racing around the world since its inaugural season in 1950. The word \"formula\" in the name refers to the set of rules to which all participants' cars must conform. A Formula One season consists of a series of races, known as Grands Prix, which take place worldwide on purpose-built circuits and on public roads.\n\n### Content\nThe dataset consists of all information on the Formula 1 races, drivers, constructors, qualifying, circuits, lap times, pit stops, championships from 1950 till the latest 2021 season.\n\n### Acknowledgements\nThe data is compiled from http://ergast.com/mrd/\n\n### Inspiration\n\"Races are won at the track. Championships are won at the factory.\" - Mercedes (2019)\n\nWith the amount of data being captured, analyzed and used to design, build and drive the Formula 1 cars is astounding. It is a global sport being followed by millions of people worldwide and it is very fascinating to see drivers pushing their limit in these vehicles to become the fastest racers in the world!", "VersionNotes": "updated till 7th July, 2021", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 468218, "CreatorUserId": 113389, "OwnerUserId": 113389.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5119555.0, "CurrentDatasourceVersionId": 5190912.0, "ForumId": 481134, "Type": 2, "CreationDate": "01/07/2020 17:47:49", "LastActivityDate": "01/07/2020", "TotalViews": 313044, "TotalDownloads": 55234, "TotalVotes": 1278, "TotalKernels": 84}]
[{"Id": 113389, "UserName": "rohanrao", "DisplayName": "Vopani", "RegisterDate": "07/09/2013", "PerformanceTier": 4}]
# # 1\. Descrição e metódo de cálculo # A ideia é analisar os dados de todos os anos da Formula 1 e definir o Campeão de todos os tempos. # Será utilizada uma base de dados de 1950 até 2020, porém o sistema de pontuação será utilizado do ano de 2020, conforme lista abaixo. # 1. 25 pontos; # 2. 18 pontos; # 3. 15 pontos; # 4. 12 pontos; # 5. 10 pontos; # 6. 8 pontos; # 7. 6 pontos; # 8. 4 pontos; # 9. 2 pontos; # 10. 1 ponto. # - Não será utilizado o ponto extra de volta mais rápida, pois o dataset esta incompleto. # # 2\. Montagem da Estrutura # Importação de Bibliotecas e Dataset # ## 2.1. Importação de Bibliotecas import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # ## 2.2. Carga de Dados df_drivers = pd.read_csv( "../input/formula-1-world-championship-1950-2020/drivers.csv", na_values="\\N" ) df_teams = pd.read_csv( "../input/formula-1-world-championship-1950-2020/constructors.csv", na_values="\\N" ) df_results = pd.read_csv( "../input/formula-1-world-championship-1950-2020/results.csv", na_values="\\N" ) df_races = pd.read_csv( "../input/formula-1-world-championship-1950-2020/races.csv", na_values="\\N" ) # # 3\. Exploração dos Dados # Visualização e tratamento dos dados do Dataset, além de separar os dados que serão utilizados. # ## 3.1. Visualização dos dados # Pilotos/Competidores df_drivers.head(n=20) # Times/Equipes df_teams.head(n=20) # Corridas df_races.head(n=20) # Resultados df_results.head(n=20) # ## 3.2. Tratamento dos Dados # Montagem da tabela principal de trabalho. # Geração dos data frames apenas com as colunas e dados que interessa. col_list_drivers = ["driverId", "forename", "surname", "code"] df_drivers_cl = df_drivers.filter(col_list_drivers, axis=1) col_list_teams = ["constructorId", "name"] df_teams_cl = df_teams.filter(col_list_teams, axis=1) col_list_races = ["raceId", "year", "round"] df_races_cl = df_races.filter(col_list_races, axis=1) # Além de selecionar as colunas que fazem sentido para a analise, selecionei apenas as linhas em que o piloto ficou entre as 10 primeiras colocações. col_list_results = [ "raceId", "driverId", "constructorId", "position", ] df_results_cl = df_results[df_results["position"] < 11].filter(col_list_results, axis=1) # Verificação dos data frames criados anteriormente print("Drivers") print(df_drivers_cl.head(n=5)) print("") print("Teams") print(df_teams_cl.head(n=5)) print("") print("Races") print(df_races_cl.head(n=5)) print("") print("Results") print(df_results_cl.head(n=20)) # # 4\. Análise dos dados # Analise dos dados e levantamento dos primeiros dados conforme metodo de calculo. # ## 4.1. Quantidade de Etapas por Temporada # O calculo será feito por etapas, em um primeiro momento é importante separar a quantidade de etapas de cada temporada. # Visualização da quantidade de Etapas por ano sns.set_theme(style="whitegrid") fig, axis = plt.subplots(1, 1, figsize=(20, 5), sharex=True) df_races_plot = df_races_cl["year"].value_counts().reset_index() df_races_plot.columns = ["Year", "Races"] df_races_plot.rename(columns={"Year": "Races"}) df_races_plot.sort_values(by=["Year"], inplace=True) df_races_plot.sort_values(by=["Year"]) graph = sns.barplot(x=df_races_plot["Year"], y=df_races_plot["Races"], palette="crest") graph.set(title="Qty races per Year", xlabel="Years", ylabel="Races") graph.set_xticklabels(labels=graph.get_xticklabels(), rotation=90) max = df_races_plot.describe() * 1.1 max_y = 0 _, max_y_f = graph.get_ylim() max_y = max_y_f if max_y_f > max_y else max_y graph.set(ylim=(0, max_y)) fig.show() df_races_plot.transpose() # ## 4.2. Total de pontos por temporada # Visualização do total de pontos de cada piloto e equipe por temporada. # Mudança dos valores de position e rank de Float para Int float_to_int = lambda value: int(value) test_values = [10.0, 55.0, 2.0, 6.0] values_int = list(map(float_to_int, test_values)) print(test_values) print(values_int) df_results_cl.dtypes df_results_cl["position"] = df_results_cl["position"].apply(float_to_int) # df_results_cl['rank'] = df_results_cl['rank'].apply(float_to_int) df_results_cl.dtypes # Adição da coluna com quantidade de pontos points = [1, 25, 18, 15, 12, 10, 8, 6, 4, 2, 1] df_results_cl["points"] = 0 df_results_cl["year"] = 0 for index, result in df_results_cl.iterrows(): result["points"] = points[result["position"]] for index, result in df_results_cl.iterrows(): raceId = int(result["raceId"]) year = int(df_races_cl[df_races_cl["raceId"] == raceId]["year"]) result["year"] = year df_results_cl # Criação da listagem de anos years = [] for year in df_races_cl["year"].sort_values(): if year not in years: years.append(year) print(years) # Criação do dataset de pilotos x anos x pontos que será utilizado nos próximos graficos df_drivers_years_points = df_results_cl.filter( ["driverId", "constructorId", "points", "year"], axis=1 ) points_drivers_array = [] points_teams_array = [] for year in years: points_year = df_drivers_years_points[df_drivers_years_points["year"] == year] for index, driver in df_drivers_cl.iterrows(): array = [] array.extend([driver["forename"], driver["surname"], driver["code"]]) array.append(year) array.append( points_year[points_year["driverId"] == driver["driverId"]].sum()["points"] ) points_drivers_array.append(array) for index, team in df_teams_cl.iterrows(): array = [] array.extend([team["name"], year]) array.append( points_year[points_year["constructorId"] == team["constructorId"]].sum()[ "points" ] ) points_teams_array.append(array) df_drivers_points = pd.DataFrame( points_drivers_array, columns=("forename", "surname", "code", "year", "points") ) df_teams_points = pd.DataFrame(points_teams_array, columns=("name", "year", "points")) # Visualização do dataframe dos piltos. Altere o valor de 1992 com o ano desejado. view_drivers = df_drivers_points[df_drivers_points["year"] == 1992] view_drivers = view_drivers[view_drivers["points"] > 0] view_drivers[view_drivers["year"] == 1992].sort_values(by=["points"], ascending=False) # Visualização do dataframe das equipes. Altere o valor de 1992 com o ano desejado. view_teams = df_teams_points[df_teams_points["year"] == 1992] view_teams = view_teams[view_teams["points"] > 0] view_teams[view_teams["year"] == 1992].sort_values(by=["points"], ascending=False) # Criação dos gráficos sns.set_theme(style="whitegrid") axis = 0 max_y = 0 columns = 2 rows = int(len(years) / columns) view, axes = plt.subplots(rows, columns, figsize=(30, 300), sharex=False) plt.subplots_adjust(hspace=0.4) axes = axes.flatten() for year in years: plot = df_drivers_points[df_drivers_points["year"] == year] plot = plot[plot["points"] > 0].sort_values(by=["points"], ascending=False, axis=0) fig = sns.barplot( x=plot["forename"], y=plot["points"], ax=axes[axis], ci=None, palette="crest" ) fig.set(title=f"Standings of F1 {year}", xlabel="Drivers", ylabel="Points") fig.set_xticklabels(labels=fig.get_xticklabels(), rotation=90) axis += 1 max = plot.describe() * 1.1 _, max_y_f = fig.get_ylim() max_y = max_y_f if max_y_f > max_y else max_y fig.set(ylim=(0, max_y)) view.show() # Criação dos gráficos sns.set_theme(style="whitegrid") axis = 0 max_y = 0 columns = 2 rows = int(len(years) / columns) view, axes = plt.subplots(rows, columns, figsize=(30, 300), sharex=False) plt.subplots_adjust(hspace=0.4) axes = axes.flatten() for year in years: plot = df_teams_points[df_teams_points["year"] == year] plot = plot[plot["points"] > 0].sort_values(by=["points"], ascending=False, axis=0) fig = sns.barplot( x=plot["name"], y=plot["points"], ax=axes[axis], ci=None, palette="crest" ) fig.set(title=f"Standings of F1 {year}", xlabel="Teams", ylabel="Points") fig.set_xticklabels(labels=fig.get_xticklabels(), rotation=90) axis += 1 max = plot.describe() * 1.1 _, max_y_f = fig.get_ylim() max_y = max_y_f if max_y_f > max_y else max_y fig.set(ylim=(0, max_y)) view.show()
[{"formula-1-world-championship-1950-2020/races.csv": {"column_names": "[\"raceId\", \"year\", \"round\", \"circuitId\", \"name\", \"date\", \"time\", \"url\", \"fp1_date\", \"fp1_time\", \"fp2_date\", \"fp2_time\", \"fp3_date\", \"fp3_time\", \"quali_date\", \"quali_time\", \"sprint_date\", \"sprint_time\"]", "column_data_types": "{\"raceId\": \"int64\", \"year\": \"int64\", \"round\": \"int64\", \"circuitId\": \"int64\", \"name\": \"object\", \"date\": \"object\", \"time\": \"object\", \"url\": \"object\", \"fp1_date\": \"object\", \"fp1_time\": \"object\", \"fp2_date\": \"object\", \"fp2_time\": \"object\", \"fp3_date\": \"object\", \"fp3_time\": \"object\", \"quali_date\": \"object\", \"quali_time\": \"object\", \"sprint_date\": \"object\", \"sprint_time\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1101 entries, 0 to 1100\nData columns (total 18 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 raceId 1101 non-null int64 \n 1 year 1101 non-null int64 \n 2 round 1101 non-null int64 \n 3 circuitId 1101 non-null int64 \n 4 name 1101 non-null object\n 5 date 1101 non-null object\n 6 time 1101 non-null object\n 7 url 1101 non-null object\n 8 fp1_date 1101 non-null object\n 9 fp1_time 1101 non-null object\n 10 fp2_date 1101 non-null object\n 11 fp2_time 1101 non-null object\n 12 fp3_date 1101 non-null object\n 13 fp3_time 1101 non-null object\n 14 quali_date 1101 non-null object\n 15 quali_time 1101 non-null object\n 16 sprint_date 1101 non-null object\n 17 sprint_time 1101 non-null object\ndtypes: int64(4), object(14)\nmemory usage: 155.0+ KB\n", "summary": "{\"raceId\": {\"count\": 1101.0, \"mean\": 553.3551316984559, \"std\": 321.42578974357065, \"min\": 1.0, \"25%\": 276.0, \"50%\": 551.0, \"75%\": 826.0, \"max\": 1120.0}, \"year\": {\"count\": 1101.0, \"mean\": 1992.0208900999091, \"std\": 20.29640629918379, \"min\": 1950.0, \"25%\": 1976.0, \"50%\": 1994.0, \"75%\": 2010.0, \"max\": 2023.0}, \"round\": {\"count\": 1101.0, \"mean\": 8.494096276112625, \"std\": 5.081088969341006, \"min\": 1.0, \"25%\": 4.0, \"50%\": 8.0, \"75%\": 12.0, \"max\": 22.0}, \"circuitId\": {\"count\": 1101.0, \"mean\": 23.70027247956403, \"std\": 19.34601415181047, \"min\": 1.0, \"25%\": 9.0, \"50%\": 18.0, \"75%\": 34.0, \"max\": 80.0}}", "examples": "{\"raceId\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"year\":{\"0\":2009,\"1\":2009,\"2\":2009,\"3\":2009},\"round\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"circuitId\":{\"0\":1,\"1\":2,\"2\":17,\"3\":3},\"name\":{\"0\":\"Australian Grand Prix\",\"1\":\"Malaysian Grand Prix\",\"2\":\"Chinese Grand Prix\",\"3\":\"Bahrain Grand Prix\"},\"date\":{\"0\":\"2009-03-29\",\"1\":\"2009-04-05\",\"2\":\"2009-04-19\",\"3\":\"2009-04-26\"},\"time\":{\"0\":\"06:00:00\",\"1\":\"09:00:00\",\"2\":\"07:00:00\",\"3\":\"12:00:00\"},\"url\":{\"0\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/2009_Australian_Grand_Prix\",\"1\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/2009_Malaysian_Grand_Prix\",\"2\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/2009_Chinese_Grand_Prix\",\"3\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/2009_Bahrain_Grand_Prix\"},\"fp1_date\":{\"0\":\"\\\\N\",\"1\":\"\\\\N\",\"2\":\"\\\\N\",\"3\":\"\\\\N\"},\"fp1_time\":{\"0\":\"\\\\N\",\"1\":\"\\\\N\",\"2\":\"\\\\N\",\"3\":\"\\\\N\"},\"fp2_date\":{\"0\":\"\\\\N\",\"1\":\"\\\\N\",\"2\":\"\\\\N\",\"3\":\"\\\\N\"},\"fp2_time\":{\"0\":\"\\\\N\",\"1\":\"\\\\N\",\"2\":\"\\\\N\",\"3\":\"\\\\N\"},\"fp3_date\":{\"0\":\"\\\\N\",\"1\":\"\\\\N\",\"2\":\"\\\\N\",\"3\":\"\\\\N\"},\"fp3_time\":{\"0\":\"\\\\N\",\"1\":\"\\\\N\",\"2\":\"\\\\N\",\"3\":\"\\\\N\"},\"quali_date\":{\"0\":\"\\\\N\",\"1\":\"\\\\N\",\"2\":\"\\\\N\",\"3\":\"\\\\N\"},\"quali_time\":{\"0\":\"\\\\N\",\"1\":\"\\\\N\",\"2\":\"\\\\N\",\"3\":\"\\\\N\"},\"sprint_date\":{\"0\":\"\\\\N\",\"1\":\"\\\\N\",\"2\":\"\\\\N\",\"3\":\"\\\\N\"},\"sprint_time\":{\"0\":\"\\\\N\",\"1\":\"\\\\N\",\"2\":\"\\\\N\",\"3\":\"\\\\N\"}}"}}, {"formula-1-world-championship-1950-2020/constructors.csv": {"column_names": "[\"constructorId\", \"constructorRef\", \"name\", \"nationality\", \"url\"]", "column_data_types": "{\"constructorId\": \"int64\", \"constructorRef\": \"object\", \"name\": \"object\", \"nationality\": \"object\", \"url\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 211 entries, 0 to 210\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 constructorId 211 non-null int64 \n 1 constructorRef 211 non-null object\n 2 name 211 non-null object\n 3 nationality 211 non-null object\n 4 url 211 non-null object\ndtypes: int64(1), object(4)\nmemory usage: 8.4+ KB\n", "summary": "{\"constructorId\": {\"count\": 211.0, \"mean\": 107.03791469194313, \"std\": 61.65362912434443, \"min\": 1.0, \"25%\": 54.5, \"50%\": 107.0, \"75%\": 159.5, \"max\": 214.0}}", "examples": "{\"constructorId\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"constructorRef\":{\"0\":\"mclaren\",\"1\":\"bmw_sauber\",\"2\":\"williams\",\"3\":\"renault\"},\"name\":{\"0\":\"McLaren\",\"1\":\"BMW Sauber\",\"2\":\"Williams\",\"3\":\"Renault\"},\"nationality\":{\"0\":\"British\",\"1\":\"German\",\"2\":\"British\",\"3\":\"French\"},\"url\":{\"0\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/McLaren\",\"1\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/BMW_Sauber\",\"2\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/Williams_Grand_Prix_Engineering\",\"3\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/Renault_in_Formula_One\"}}"}}, {"formula-1-world-championship-1950-2020/results.csv": {"column_names": "[\"resultId\", \"raceId\", \"driverId\", \"constructorId\", \"number\", \"grid\", \"position\", \"positionText\", \"positionOrder\", \"points\", \"laps\", \"time\", \"milliseconds\", \"fastestLap\", \"rank\", \"fastestLapTime\", \"fastestLapSpeed\", \"statusId\"]", "column_data_types": "{\"resultId\": \"int64\", \"raceId\": \"int64\", \"driverId\": \"int64\", \"constructorId\": \"int64\", \"number\": \"object\", \"grid\": \"int64\", \"position\": \"object\", \"positionText\": \"object\", \"positionOrder\": \"int64\", \"points\": \"float64\", \"laps\": \"int64\", \"time\": \"object\", \"milliseconds\": \"object\", \"fastestLap\": \"object\", \"rank\": \"object\", \"fastestLapTime\": \"object\", \"fastestLapSpeed\": \"object\", \"statusId\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 26080 entries, 0 to 26079\nData columns (total 18 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 resultId 26080 non-null int64 \n 1 raceId 26080 non-null int64 \n 2 driverId 26080 non-null int64 \n 3 constructorId 26080 non-null int64 \n 4 number 26080 non-null object \n 5 grid 26080 non-null int64 \n 6 position 26080 non-null object \n 7 positionText 26080 non-null object \n 8 positionOrder 26080 non-null int64 \n 9 points 26080 non-null float64\n 10 laps 26080 non-null int64 \n 11 time 26080 non-null object \n 12 milliseconds 26080 non-null object \n 13 fastestLap 26080 non-null object \n 14 rank 26080 non-null object \n 15 fastestLapTime 26080 non-null object \n 16 fastestLapSpeed 26080 non-null object \n 17 statusId 26080 non-null int64 \ndtypes: float64(1), int64(8), object(9)\nmemory usage: 3.6+ MB\n", "summary": "{\"resultId\": {\"count\": 26080.0, \"mean\": 13041.372661042944, \"std\": 7530.008377404087, \"min\": 1.0, \"25%\": 6520.75, \"50%\": 13040.5, \"75%\": 19560.25, \"max\": 26085.0}, \"raceId\": {\"count\": 26080.0, \"mean\": 536.6956671779141, \"std\": 303.0346394319806, \"min\": 1.0, \"25%\": 294.75, \"50%\": 519.0, \"75%\": 791.0, \"max\": 1110.0}, \"driverId\": {\"count\": 26080.0, \"mean\": 266.2775690184049, \"std\": 272.5816217334541, \"min\": 1.0, \"25%\": 57.0, \"50%\": 163.0, \"75%\": 364.0, \"max\": 858.0}, \"constructorId\": {\"count\": 26080.0, \"mean\": 49.05966257668712, \"std\": 60.221056147762184, \"min\": 1.0, \"25%\": 6.0, \"50%\": 25.0, \"75%\": 58.25, \"max\": 214.0}, \"grid\": {\"count\": 26080.0, \"mean\": 11.167561349693251, \"std\": 7.232796571993715, \"min\": 0.0, \"25%\": 5.0, \"50%\": 11.0, \"75%\": 17.0, \"max\": 34.0}, \"positionOrder\": {\"count\": 26080.0, \"mean\": 12.854141104294479, \"std\": 7.700067832456976, \"min\": 1.0, \"25%\": 6.0, \"50%\": 12.0, \"75%\": 18.0, \"max\": 39.0}, \"points\": {\"count\": 26080.0, \"mean\": 1.9066353527607363, \"std\": 4.219714734476984, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 2.0, \"max\": 50.0}, \"laps\": {\"count\": 26080.0, \"mean\": 46.076687116564415, \"std\": 29.726057711471235, \"min\": 0.0, \"25%\": 22.0, \"50%\": 53.0, \"75%\": 66.0, \"max\": 200.0}, \"statusId\": {\"count\": 26080.0, \"mean\": 17.4760736196319, \"std\": 26.129964996995792, \"min\": 1.0, \"25%\": 1.0, \"50%\": 10.0, \"75%\": 14.0, \"max\": 141.0}}", "examples": "{\"resultId\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"raceId\":{\"0\":18,\"1\":18,\"2\":18,\"3\":18},\"driverId\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"constructorId\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"number\":{\"0\":\"22\",\"1\":\"3\",\"2\":\"7\",\"3\":\"5\"},\"grid\":{\"0\":1,\"1\":5,\"2\":7,\"3\":11},\"position\":{\"0\":\"1\",\"1\":\"2\",\"2\":\"3\",\"3\":\"4\"},\"positionText\":{\"0\":\"1\",\"1\":\"2\",\"2\":\"3\",\"3\":\"4\"},\"positionOrder\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"points\":{\"0\":10.0,\"1\":8.0,\"2\":6.0,\"3\":5.0},\"laps\":{\"0\":58,\"1\":58,\"2\":58,\"3\":58},\"time\":{\"0\":\"1:34:50.616\",\"1\":\"+5.478\",\"2\":\"+8.163\",\"3\":\"+17.181\"},\"milliseconds\":{\"0\":\"5690616\",\"1\":\"5696094\",\"2\":\"5698779\",\"3\":\"5707797\"},\"fastestLap\":{\"0\":\"39\",\"1\":\"41\",\"2\":\"41\",\"3\":\"58\"},\"rank\":{\"0\":\"2\",\"1\":\"3\",\"2\":\"5\",\"3\":\"7\"},\"fastestLapTime\":{\"0\":\"1:27.452\",\"1\":\"1:27.739\",\"2\":\"1:28.090\",\"3\":\"1:28.603\"},\"fastestLapSpeed\":{\"0\":\"218.300\",\"1\":\"217.586\",\"2\":\"216.719\",\"3\":\"215.464\"},\"statusId\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1}}"}}, {"formula-1-world-championship-1950-2020/drivers.csv": {"column_names": "[\"driverId\", \"driverRef\", \"number\", \"code\", \"forename\", \"surname\", \"dob\", \"nationality\", \"url\"]", "column_data_types": "{\"driverId\": \"int64\", \"driverRef\": \"object\", \"number\": \"object\", \"code\": \"object\", \"forename\": \"object\", \"surname\": \"object\", \"dob\": \"object\", \"nationality\": \"object\", \"url\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 857 entries, 0 to 856\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 driverId 857 non-null int64 \n 1 driverRef 857 non-null object\n 2 number 857 non-null object\n 3 code 857 non-null object\n 4 forename 857 non-null object\n 5 surname 857 non-null object\n 6 dob 857 non-null object\n 7 nationality 857 non-null object\n 8 url 857 non-null object\ndtypes: int64(1), object(8)\nmemory usage: 60.4+ KB\n", "summary": "{\"driverId\": {\"count\": 857.0, \"mean\": 429.0571761960327, \"std\": 247.63240156833837, \"min\": 1.0, \"25%\": 215.0, \"50%\": 429.0, \"75%\": 643.0, \"max\": 858.0}}", "examples": "{\"driverId\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"driverRef\":{\"0\":\"hamilton\",\"1\":\"heidfeld\",\"2\":\"rosberg\",\"3\":\"alonso\"},\"number\":{\"0\":\"44\",\"1\":\"\\\\N\",\"2\":\"6\",\"3\":\"14\"},\"code\":{\"0\":\"HAM\",\"1\":\"HEI\",\"2\":\"ROS\",\"3\":\"ALO\"},\"forename\":{\"0\":\"Lewis\",\"1\":\"Nick\",\"2\":\"Nico\",\"3\":\"Fernando\"},\"surname\":{\"0\":\"Hamilton\",\"1\":\"Heidfeld\",\"2\":\"Rosberg\",\"3\":\"Alonso\"},\"dob\":{\"0\":\"1985-01-07\",\"1\":\"1977-05-10\",\"2\":\"1985-06-27\",\"3\":\"1981-07-29\"},\"nationality\":{\"0\":\"British\",\"1\":\"German\",\"2\":\"German\",\"3\":\"Spanish\"},\"url\":{\"0\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/Lewis_Hamilton\",\"1\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/Nick_Heidfeld\",\"2\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/Nico_Rosberg\",\"3\":\"http:\\/\\/en.wikipedia.org\\/wiki\\/Fernando_Alonso\"}}"}}]
true
4
<start_data_description><data_path>formula-1-world-championship-1950-2020/races.csv: <column_names> ['raceId', 'year', 'round', 'circuitId', 'name', 'date', 'time', 'url', 'fp1_date', 'fp1_time', 'fp2_date', 'fp2_time', 'fp3_date', 'fp3_time', 'quali_date', 'quali_time', 'sprint_date', 'sprint_time'] <column_types> {'raceId': 'int64', 'year': 'int64', 'round': 'int64', 'circuitId': 'int64', 'name': 'object', 'date': 'object', 'time': 'object', 'url': 'object', 'fp1_date': 'object', 'fp1_time': 'object', 'fp2_date': 'object', 'fp2_time': 'object', 'fp3_date': 'object', 'fp3_time': 'object', 'quali_date': 'object', 'quali_time': 'object', 'sprint_date': 'object', 'sprint_time': 'object'} <dataframe_Summary> {'raceId': {'count': 1101.0, 'mean': 553.3551316984559, 'std': 321.42578974357065, 'min': 1.0, '25%': 276.0, '50%': 551.0, '75%': 826.0, 'max': 1120.0}, 'year': {'count': 1101.0, 'mean': 1992.0208900999091, 'std': 20.29640629918379, 'min': 1950.0, '25%': 1976.0, '50%': 1994.0, '75%': 2010.0, 'max': 2023.0}, 'round': {'count': 1101.0, 'mean': 8.494096276112625, 'std': 5.081088969341006, 'min': 1.0, '25%': 4.0, '50%': 8.0, '75%': 12.0, 'max': 22.0}, 'circuitId': {'count': 1101.0, 'mean': 23.70027247956403, 'std': 19.34601415181047, 'min': 1.0, '25%': 9.0, '50%': 18.0, '75%': 34.0, 'max': 80.0}} <dataframe_info> RangeIndex: 1101 entries, 0 to 1100 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 raceId 1101 non-null int64 1 year 1101 non-null int64 2 round 1101 non-null int64 3 circuitId 1101 non-null int64 4 name 1101 non-null object 5 date 1101 non-null object 6 time 1101 non-null object 7 url 1101 non-null object 8 fp1_date 1101 non-null object 9 fp1_time 1101 non-null object 10 fp2_date 1101 non-null object 11 fp2_time 1101 non-null object 12 fp3_date 1101 non-null object 13 fp3_time 1101 non-null object 14 quali_date 1101 non-null object 15 quali_time 1101 non-null object 16 sprint_date 1101 non-null object 17 sprint_time 1101 non-null object dtypes: int64(4), object(14) memory usage: 155.0+ KB <some_examples> {'raceId': {'0': 1, '1': 2, '2': 3, '3': 4}, 'year': {'0': 2009, '1': 2009, '2': 2009, '3': 2009}, 'round': {'0': 1, '1': 2, '2': 3, '3': 4}, 'circuitId': {'0': 1, '1': 2, '2': 17, '3': 3}, 'name': {'0': 'Australian Grand Prix', '1': 'Malaysian Grand Prix', '2': 'Chinese Grand Prix', '3': 'Bahrain Grand Prix'}, 'date': {'0': '2009-03-29', '1': '2009-04-05', '2': '2009-04-19', '3': '2009-04-26'}, 'time': {'0': '06:00:00', '1': '09:00:00', '2': '07:00:00', '3': '12:00:00'}, 'url': {'0': 'http://en.wikipedia.org/wiki/2009_Australian_Grand_Prix', '1': 'http://en.wikipedia.org/wiki/2009_Malaysian_Grand_Prix', '2': 'http://en.wikipedia.org/wiki/2009_Chinese_Grand_Prix', '3': 'http://en.wikipedia.org/wiki/2009_Bahrain_Grand_Prix'}, 'fp1_date': {'0': '\\N', '1': '\\N', '2': '\\N', '3': '\\N'}, 'fp1_time': {'0': '\\N', '1': '\\N', '2': '\\N', '3': '\\N'}, 'fp2_date': {'0': '\\N', '1': '\\N', '2': '\\N', '3': '\\N'}, 'fp2_time': {'0': '\\N', '1': '\\N', '2': '\\N', '3': '\\N'}, 'fp3_date': {'0': '\\N', '1': '\\N', '2': '\\N', '3': '\\N'}, 'fp3_time': {'0': '\\N', '1': '\\N', '2': '\\N', '3': '\\N'}, 'quali_date': {'0': '\\N', '1': '\\N', '2': '\\N', '3': '\\N'}, 'quali_time': {'0': '\\N', '1': '\\N', '2': '\\N', '3': '\\N'}, 'sprint_date': {'0': '\\N', '1': '\\N', '2': '\\N', '3': '\\N'}, 'sprint_time': {'0': '\\N', '1': '\\N', '2': '\\N', '3': '\\N'}} <end_description> <start_data_description><data_path>formula-1-world-championship-1950-2020/constructors.csv: <column_names> ['constructorId', 'constructorRef', 'name', 'nationality', 'url'] <column_types> {'constructorId': 'int64', 'constructorRef': 'object', 'name': 'object', 'nationality': 'object', 'url': 'object'} <dataframe_Summary> {'constructorId': {'count': 211.0, 'mean': 107.03791469194313, 'std': 61.65362912434443, 'min': 1.0, '25%': 54.5, '50%': 107.0, '75%': 159.5, 'max': 214.0}} <dataframe_info> RangeIndex: 211 entries, 0 to 210 Data columns (total 5 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 constructorId 211 non-null int64 1 constructorRef 211 non-null object 2 name 211 non-null object 3 nationality 211 non-null object 4 url 211 non-null object dtypes: int64(1), object(4) memory usage: 8.4+ KB <some_examples> {'constructorId': {'0': 1, '1': 2, '2': 3, '3': 4}, 'constructorRef': {'0': 'mclaren', '1': 'bmw_sauber', '2': 'williams', '3': 'renault'}, 'name': {'0': 'McLaren', '1': 'BMW Sauber', '2': 'Williams', '3': 'Renault'}, 'nationality': {'0': 'British', '1': 'German', '2': 'British', '3': 'French'}, 'url': {'0': 'http://en.wikipedia.org/wiki/McLaren', '1': 'http://en.wikipedia.org/wiki/BMW_Sauber', '2': 'http://en.wikipedia.org/wiki/Williams_Grand_Prix_Engineering', '3': 'http://en.wikipedia.org/wiki/Renault_in_Formula_One'}} <end_description> <start_data_description><data_path>formula-1-world-championship-1950-2020/results.csv: <column_names> ['resultId', 'raceId', 'driverId', 'constructorId', 'number', 'grid', 'position', 'positionText', 'positionOrder', 'points', 'laps', 'time', 'milliseconds', 'fastestLap', 'rank', 'fastestLapTime', 'fastestLapSpeed', 'statusId'] <column_types> {'resultId': 'int64', 'raceId': 'int64', 'driverId': 'int64', 'constructorId': 'int64', 'number': 'object', 'grid': 'int64', 'position': 'object', 'positionText': 'object', 'positionOrder': 'int64', 'points': 'float64', 'laps': 'int64', 'time': 'object', 'milliseconds': 'object', 'fastestLap': 'object', 'rank': 'object', 'fastestLapTime': 'object', 'fastestLapSpeed': 'object', 'statusId': 'int64'} <dataframe_Summary> {'resultId': {'count': 26080.0, 'mean': 13041.372661042944, 'std': 7530.008377404087, 'min': 1.0, '25%': 6520.75, '50%': 13040.5, '75%': 19560.25, 'max': 26085.0}, 'raceId': {'count': 26080.0, 'mean': 536.6956671779141, 'std': 303.0346394319806, 'min': 1.0, '25%': 294.75, '50%': 519.0, '75%': 791.0, 'max': 1110.0}, 'driverId': {'count': 26080.0, 'mean': 266.2775690184049, 'std': 272.5816217334541, 'min': 1.0, '25%': 57.0, '50%': 163.0, '75%': 364.0, 'max': 858.0}, 'constructorId': {'count': 26080.0, 'mean': 49.05966257668712, 'std': 60.221056147762184, 'min': 1.0, '25%': 6.0, '50%': 25.0, '75%': 58.25, 'max': 214.0}, 'grid': {'count': 26080.0, 'mean': 11.167561349693251, 'std': 7.232796571993715, 'min': 0.0, '25%': 5.0, '50%': 11.0, '75%': 17.0, 'max': 34.0}, 'positionOrder': {'count': 26080.0, 'mean': 12.854141104294479, 'std': 7.700067832456976, 'min': 1.0, '25%': 6.0, '50%': 12.0, '75%': 18.0, 'max': 39.0}, 'points': {'count': 26080.0, 'mean': 1.9066353527607363, 'std': 4.219714734476984, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 2.0, 'max': 50.0}, 'laps': {'count': 26080.0, 'mean': 46.076687116564415, 'std': 29.726057711471235, 'min': 0.0, '25%': 22.0, '50%': 53.0, '75%': 66.0, 'max': 200.0}, 'statusId': {'count': 26080.0, 'mean': 17.4760736196319, 'std': 26.129964996995792, 'min': 1.0, '25%': 1.0, '50%': 10.0, '75%': 14.0, 'max': 141.0}} <dataframe_info> RangeIndex: 26080 entries, 0 to 26079 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 resultId 26080 non-null int64 1 raceId 26080 non-null int64 2 driverId 26080 non-null int64 3 constructorId 26080 non-null int64 4 number 26080 non-null object 5 grid 26080 non-null int64 6 position 26080 non-null object 7 positionText 26080 non-null object 8 positionOrder 26080 non-null int64 9 points 26080 non-null float64 10 laps 26080 non-null int64 11 time 26080 non-null object 12 milliseconds 26080 non-null object 13 fastestLap 26080 non-null object 14 rank 26080 non-null object 15 fastestLapTime 26080 non-null object 16 fastestLapSpeed 26080 non-null object 17 statusId 26080 non-null int64 dtypes: float64(1), int64(8), object(9) memory usage: 3.6+ MB <some_examples> {'resultId': {'0': 1, '1': 2, '2': 3, '3': 4}, 'raceId': {'0': 18, '1': 18, '2': 18, '3': 18}, 'driverId': {'0': 1, '1': 2, '2': 3, '3': 4}, 'constructorId': {'0': 1, '1': 2, '2': 3, '3': 4}, 'number': {'0': '22', '1': '3', '2': '7', '3': '5'}, 'grid': {'0': 1, '1': 5, '2': 7, '3': 11}, 'position': {'0': '1', '1': '2', '2': '3', '3': '4'}, 'positionText': {'0': '1', '1': '2', '2': '3', '3': '4'}, 'positionOrder': {'0': 1, '1': 2, '2': 3, '3': 4}, 'points': {'0': 10.0, '1': 8.0, '2': 6.0, '3': 5.0}, 'laps': {'0': 58, '1': 58, '2': 58, '3': 58}, 'time': {'0': '1:34:50.616', '1': '+5.478', '2': '+8.163', '3': '+17.181'}, 'milliseconds': {'0': '5690616', '1': '5696094', '2': '5698779', '3': '5707797'}, 'fastestLap': {'0': '39', '1': '41', '2': '41', '3': '58'}, 'rank': {'0': '2', '1': '3', '2': '5', '3': '7'}, 'fastestLapTime': {'0': '1:27.452', '1': '1:27.739', '2': '1:28.090', '3': '1:28.603'}, 'fastestLapSpeed': {'0': '218.300', '1': '217.586', '2': '216.719', '3': '215.464'}, 'statusId': {'0': 1, '1': 1, '2': 1, '3': 1}} <end_description> <start_data_description><data_path>formula-1-world-championship-1950-2020/drivers.csv: <column_names> ['driverId', 'driverRef', 'number', 'code', 'forename', 'surname', 'dob', 'nationality', 'url'] <column_types> {'driverId': 'int64', 'driverRef': 'object', 'number': 'object', 'code': 'object', 'forename': 'object', 'surname': 'object', 'dob': 'object', 'nationality': 'object', 'url': 'object'} <dataframe_Summary> {'driverId': {'count': 857.0, 'mean': 429.0571761960327, 'std': 247.63240156833837, 'min': 1.0, '25%': 215.0, '50%': 429.0, '75%': 643.0, 'max': 858.0}} <dataframe_info> RangeIndex: 857 entries, 0 to 856 Data columns (total 9 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 driverId 857 non-null int64 1 driverRef 857 non-null object 2 number 857 non-null object 3 code 857 non-null object 4 forename 857 non-null object 5 surname 857 non-null object 6 dob 857 non-null object 7 nationality 857 non-null object 8 url 857 non-null object dtypes: int64(1), object(8) memory usage: 60.4+ KB <some_examples> {'driverId': {'0': 1, '1': 2, '2': 3, '3': 4}, 'driverRef': {'0': 'hamilton', '1': 'heidfeld', '2': 'rosberg', '3': 'alonso'}, 'number': {'0': '44', '1': '\\N', '2': '6', '3': '14'}, 'code': {'0': 'HAM', '1': 'HEI', '2': 'ROS', '3': 'ALO'}, 'forename': {'0': 'Lewis', '1': 'Nick', '2': 'Nico', '3': 'Fernando'}, 'surname': {'0': 'Hamilton', '1': 'Heidfeld', '2': 'Rosberg', '3': 'Alonso'}, 'dob': {'0': '1985-01-07', '1': '1977-05-10', '2': '1985-06-27', '3': '1981-07-29'}, 'nationality': {'0': 'British', '1': 'German', '2': 'German', '3': 'Spanish'}, 'url': {'0': 'http://en.wikipedia.org/wiki/Lewis_Hamilton', '1': 'http://en.wikipedia.org/wiki/Nick_Heidfeld', '2': 'http://en.wikipedia.org/wiki/Nico_Rosberg', '3': 'http://en.wikipedia.org/wiki/Fernando_Alonso'}} <end_description>
3,023
0
6,857
3,023
69433894
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # A Fancy Dinner Out # ****You and 3 friends, Tom, Jane and Natasha have gone out to a Fancy 5-star restaurant to celebrate your birthday. The Menu of this place is very unique. All items are placed in an assorted dictionary, which you will have to access to order everyone's desired meals. Each meal consists of a Main and Side dish.**** # ​ # * Tom: Orange Chicken with Caesar Salad # * Jane: Pasta Alfredo with Chicken Nuggets # * Natasha: Pepperoni Pizza with French Fries # **The dictionary has all words in lower case and each item's key is the first word of the dish and the second word is the value. For example, Pasta Alfredo's Key would be Pasta and the value would be Alfredo.** main_Dict = { "pasta": "alfredo", "pork": "tenderloin", "pepperoni": "pizza", "cheese": "quesadilla", "orange": "chicken", } side_Dict = { "tater": "tots", "animal": "fries", "creamcheese": "bagel", "french": "fries", "caesar": "salad", "chicken": "nuggets", } # Access all the required food dishes for your friends # Print out the food ordered in the same format as the bullet points # **All of you have finished your dishes and now its time to pay for the dishes. And of course, since it is your birthday, the dinner's on you! Access the costs of each dish from the cost dictionary, cost_Dict, and then compute the subtotal to the dinner. HINT: the costs are saved as strings so you "might" have to cast them!** main_cost_Dict = { "pork_tenderloin": "19.99", "pepperoni_pizza": "15.99", "pasta_alfredo": "11.99", "cheese_quesadilla": "8.49", "orange_chicken": "13.49", } side_cost_Dict = { "animal_fries": "4.99", "creamcheese_bagel": "3.49", "caesar_salad": "5.99", "tater_tots": "2.99", "french_fries": "3.99", "chicken_nuggets": "4.49", } # Access the prices of the ordered dishes # **Finally, before you pay the bill, you are in luck! The restaurant does offer some discounts if you spend a certain amount of money at the place! The restaurant offers a 10% discount for thirty dollars spent, 25% discount for forty-five dollars spent, and 40% discount for seventy dollars spent! Calculate which, if any, discount you are eligible for and include that into your subtotal. At the very end, add a 5% tax and print out the final total** # initialize the subtotal as a variable here based on the answer from the previous part (Manually round to the nearest cent) # Check for any discount eligibility based on the subtotal and deduct it from the subtotal if eligible # Add the taxes and print the total
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/433/69433894.ipynb
null
null
[{"Id": 69433894, "ScriptId": 18959007, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7756057, "CreationDate": "07/31/2021 01:19:44", "VersionNumber": 1.0, "Title": "Week 1 Lesson 1 Homework", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 51.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # # A Fancy Dinner Out # ****You and 3 friends, Tom, Jane and Natasha have gone out to a Fancy 5-star restaurant to celebrate your birthday. The Menu of this place is very unique. All items are placed in an assorted dictionary, which you will have to access to order everyone's desired meals. Each meal consists of a Main and Side dish.**** # ​ # * Tom: Orange Chicken with Caesar Salad # * Jane: Pasta Alfredo with Chicken Nuggets # * Natasha: Pepperoni Pizza with French Fries # **The dictionary has all words in lower case and each item's key is the first word of the dish and the second word is the value. For example, Pasta Alfredo's Key would be Pasta and the value would be Alfredo.** main_Dict = { "pasta": "alfredo", "pork": "tenderloin", "pepperoni": "pizza", "cheese": "quesadilla", "orange": "chicken", } side_Dict = { "tater": "tots", "animal": "fries", "creamcheese": "bagel", "french": "fries", "caesar": "salad", "chicken": "nuggets", } # Access all the required food dishes for your friends # Print out the food ordered in the same format as the bullet points # **All of you have finished your dishes and now its time to pay for the dishes. And of course, since it is your birthday, the dinner's on you! Access the costs of each dish from the cost dictionary, cost_Dict, and then compute the subtotal to the dinner. HINT: the costs are saved as strings so you "might" have to cast them!** main_cost_Dict = { "pork_tenderloin": "19.99", "pepperoni_pizza": "15.99", "pasta_alfredo": "11.99", "cheese_quesadilla": "8.49", "orange_chicken": "13.49", } side_cost_Dict = { "animal_fries": "4.99", "creamcheese_bagel": "3.49", "caesar_salad": "5.99", "tater_tots": "2.99", "french_fries": "3.99", "chicken_nuggets": "4.49", } # Access the prices of the ordered dishes # **Finally, before you pay the bill, you are in luck! The restaurant does offer some discounts if you spend a certain amount of money at the place! The restaurant offers a 10% discount for thirty dollars spent, 25% discount for forty-five dollars spent, and 40% discount for seventy dollars spent! Calculate which, if any, discount you are eligible for and include that into your subtotal. At the very end, add a 5% tax and print out the final total** # initialize the subtotal as a variable here based on the answer from the previous part (Manually round to the nearest cent) # Check for any discount eligibility based on the subtotal and deduct it from the subtotal if eligible # Add the taxes and print the total
false
0
991
0
991
991
69433540
<jupyter_start><jupyter_text>Pakistan Engineer's Data ### Context I have used PEC registered Engineers' data up till 2019, which is publicly available on the official [website](https://www.pec.org.pk/). I wanted to use Plotly and in build, function to assist me in interactive and clean visualization for easy interpretations. I have also added multiple data from different sources and all of them are publicly available. Overall, we will be converting PDF to CSV, cleaning the data, features engineering, analyzing using Plotly visualization and conclusion. [![Deepnote](https://img.shields.io/badge/Deepnote-Engineering_Data_Analysis-0053a0?logo=deepnote&style=for-the-badge)](https://deepnote.com/@abid/Engineering-Data-Analysis-fn8aALtMQwaElv_AUuwssg) ### Content PITC_Engineering.csv file contains all the necessary documents about engineers working in firms all around the country and their employment duration. I have also included Longitude and latitude for geospatial analysis and added more demographical information so that users can focus on analysis rather than cleaning the data. Kaggle dataset identifier: pakistan-engineers-data <jupyter_code>import pandas as pd df = pd.read_csv('pakistan-engineers-data/PITC_Engineering.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 12861 entries, 0 to 12860 Data columns (total 16 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 12861 non-null int64 1 ID 12556 non-null float64 2 Engineer_Name 12556 non-null object 3 Father_Name 12556 non-null object 4 Company_Name 12556 non-null object 5 Employment_Start_Date 12556 non-null object 6 Employment_End_Date 12556 non-null object 7 Employment_period 12556 non-null object 8 Engineering_Degree 12556 non-null object 9 PEC_No 12556 non-null float64 10 Firm_Category 12556 non-null object 11 Firms_City 12556 non-null object 12 License_no 12556 non-null float64 13 Latitude 10253 non-null float64 14 Longitude 10253 non-null float64 15 Province 10253 non-null object dtypes: float64(5), int64(1), object(10) memory usage: 1.6+ MB <jupyter_text>Examples: { "Unnamed: 0": 0, "ID": 1, "Engineer_Name": "ENGR. ASADULLAH FAROOQ", "Father_Name": "CH. MOHAMMAD FAROOQ", "Company_Name": "NADEEM CONSTRUCTION COMPANY", "Employment_Start_Date": "2019-07-15", "Employment_End_Date": "2020-06-30", "Employment_period": "351 days", "Engineering_Degree": "AERO", "PEC_No": 720, "Firm_Category": "C2", "Firms_City": "bahawalpur", "License_no": 967, "Latitude": 29.3956, "Longitude": 71.6722, "Province": "Punjab" } { "Unnamed: 0": 1, "ID": 2, "Engineer_Name": "ENGR. USAMA HAMID", "Father_Name": "ABDUL HAMID", "Company_Name": "SHABBIR CONTRACTOR", "Employment_Start_Date": "2019-07-23", "Employment_End_Date": "2020-06-30", "Employment_period": "343 days", "Engineering_Degree": "AERO", "PEC_No": 1866, "Firm_Category": "C4", "Firms_City": "muzaffarabad", "License_no": 12341, "Latitude": 34.37, "Longitude": 73.4711, "Province": "Azad Kashmir" } { "Unnamed: 0": 2, "ID": 3, "Engineer_Name": "ENGR. SHAHID KHURSHID", "Father_Name": "MUHAMMAD KHURSHID", "Company_Name": "IMAN GROUP", "Employment_Start_Date": "2019-07-19", "Employment_End_Date": "2020-06-30", "Employment_period": "347 days", "Engineering_Degree": "AERO", "PEC_No": 2211, "Firm_Category": "C4", "Firms_City": "islamabad", "License_no": 9590, "Latitude": 33.6989, "Longitude": 73.0369, "Province": "Isl\u0101m\u0101b\u0101d" } { "Unnamed: 0": 3, "ID": 4, "Engineer_Name": "ENGR. BABAR SHAFIQUE", "Father_Name": "MUHAMMAD SHAFIQUE", "Company_Name": "NOOR BUILDERS & CO", "Employment_Start_Date": "2019-09-24", "Employment_End_Date": "2020-06-30", "Employment_period": "280 days", "Engineering_Degree": "AERO", "PEC_No": 2561, "Firm_Category": "C6", "Firms_City": "mirpur khas", "License_no": 71292, "Latitude": 25.5269, "Longitude": 69.0111, "Province": "Sindh" } <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/pakistan-engineers-data/PITC_Engineering.csv") df.head() # # 0. Data Cleaning df = df.drop("Unnamed: 0", axis=1) df.describe() df.isnull().sum() df = df.dropna(how="any") df.dtypes from datetime import datetime as dt df["Employment_Start_Date"] = pd.to_datetime(df["Employment_Start_Date"]) df["Employment_End_Date"] = pd.to_datetime(df["Employment_End_Date"]) df["Employment_Start_Date"] = df["Employment_Start_Date"].dt.strftime("%Y-%m-%d") df["Employment_End_Date"] = df["Employment_End_Date"].dt.strftime("%Y-%m-%d") df["Employment_period"] = df["Employment_period"].str.replace(" days", "") df["Employment_period"] = df["Employment_period"].astype(int) df = df[df.Employment_period > 0] df.describe() import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go import seaborn as sns # # 1. Cities where firms are located plt.figure(figsize=(20, 10)) df.groupby("Firms_City")["ID"].count().sort_values(ascending=False).plot.bar() # # 2. Employment Duration # # 2-1. Years from decimal import Decimal, ROUND_HALF_UP, ROUND_HALF_EVEN df["period_y"] = df["Employment_period"] / 365 def period(ex): if ex < 1: return 0 elif 1 <= ex < 2: return 1 elif 2 <= ex < 3: return 2 elif 3 <= ex < 4: return 3 elif 4 <= ex < 5: return 4 else: return 5 df["period"] = df["period_y"].apply(period) plt.figure(figsize=(10, 10)) sns.countplot(y="period", data=df) # # 2-2. Days plt.figure(figsize=(10, 30)) sns.countplot(y="Employment_period", data=df) # # 2-3. Engineering Degree and Emplyment Period df.groupby("Engineering_Degree")["Employment_period"].mean().plot.bar() # # 2-4. Firm Category and Emplyment Period df.groupby("Firm_Category")["Employment_period"].mean().plot.bar() # # 3. Engineering Degrees # # 3-1. Engineering Degrees and Firms City df_dc = ( df[["Firms_City", "Engineering_Degree"]] .value_counts() .rename_axis(["Firms_City", "Engineering_Degree"]) .reset_index(name="counts")[:100] ) plt.figure(figsize=(12, 12)) plt.legend(fontsize=10) plt.tick_params(labelsize=10) ax = sns.scatterplot( x="Firms_City", y="counts", hue="Engineering_Degree", size="counts", data=df_dc, sizes=(50, 500), ) plt.xticks(rotation=90) ax.legend(loc="upper left", bbox_to_anchor=(1, 1)) # # 3-2. Engineering Degrees and Firms Category df_dc = ( df[["Firm_Category", "Engineering_Degree"]] .value_counts() .rename_axis(["Firm_Category", "Engineering_Degree"]) .reset_index(name="counts")[:100] ) plt.figure(figsize=(12, 12)) plt.legend(fontsize=10) plt.tick_params(labelsize=10) ax = sns.scatterplot( x="Firm_Category", y="counts", hue="Engineering_Degree", size="counts", data=df_dc, sizes=(50, 500), ) plt.xticks(rotation=90) ax.legend(loc="upper left", bbox_to_anchor=(1, 1)) # # 4. Firms Cities and Firms Category df_CC = ( df[["Firms_City", "Firm_Category"]] .value_counts() .rename_axis(["Firms_City", "Firm_Category"]) .reset_index(name="counts")[:100] ) fig = px.bar(df_CC, x="Firms_City", y="counts", color="Firm_Category") fig.show() # # 5. Engineering Degree and Firms Category df_DC = ( df[["Engineering_Degree", "Firm_Category"]] .value_counts() .rename_axis(["Engineering_Degree", "Firm_Category"]) .reset_index(name="counts")[:100] ) fig = px.bar(df_DC, x="Engineering_Degree", y="counts", color="Firm_Category") fig.show() # # 6. Longtitude and Latitude # # 6-1. Longtitude, Latitude and Province plt.figure(figsize=(30, 30)) sns.lmplot(x="Longitude", y="Latitude", hue="Province", fit_reg=False, data=df) # # 6-2. Longtitude, Latitude and Engineering Degree plt.figure(figsize=(30, 30)) sns.lmplot( x="Longitude", y="Latitude", hue="Engineering_Degree", fit_reg=False, data=df ) # # 6-3. Longtitude, Latitude and Firm Category plt.figure(figsize=(30, 30)) sns.lmplot(x="Longitude", y="Latitude", hue="Firm_Category", fit_reg=False, data=df)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/433/69433540.ipynb
pakistan-engineers-data
kingabzpro
[{"Id": 69433540, "ScriptId": 18889397, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6437951, "CreationDate": "07/31/2021 01:07:51", "VersionNumber": 1.0, "Title": "Engineers Analysis: Visualization", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 150.0, "LinesInsertedFromPrevious": 150.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
[{"Id": 92595305, "KernelVersionId": 69433540, "SourceDatasetVersionId": 2462462}]
[{"Id": 2462462, "DatasetId": 1490531, "DatasourceVersionId": 2504889, "CreatorUserId": 5769741, "LicenseName": "GPL 2", "CreationDate": "07/25/2021 19:03:02", "VersionNumber": 1.0, "Title": "Pakistan Engineer's Data", "Slug": "pakistan-engineers-data", "Subtitle": "Employed Engineers data with location and types of firms.", "Description": "### Context\n\nI have used PEC registered Engineers' data up till 2019, which is publicly available on the official [website](https://www.pec.org.pk/). I wanted to use Plotly and in build, function to assist me in interactive and clean visualization for easy interpretations. I have also added multiple data from different sources and all of them are publicly available. Overall, we will be converting PDF to CSV, cleaning the data, features engineering, analyzing using Plotly visualization and conclusion.\n [![Deepnote](https://img.shields.io/badge/Deepnote-Engineering_Data_Analysis-0053a0?logo=deepnote&style=for-the-badge)](https://deepnote.com/@abid/Engineering-Data-Analysis-fn8aALtMQwaElv_AUuwssg)\n\n### Content\nPITC_Engineering.csv file contains all the necessary documents about engineers working in firms all around the country and their employment duration. I have also included Longitude and latitude for geospatial analysis and added more demographical information so that users can focus on analysis rather than cleaning the data. \n \n \n\n### Acknowledgements\n\nCredit goes to [Paskitan Engineering council](https://www.pec.org.pk/) for making Data public and [PITC](http://pitc-pec.com/) for providing me the opportunity on working on these data. \n\n\n### Inspiration\n\nI was tasked to do some digging on Engineering data in Pakistan as we currently don't have the infrastructure for collecting and maintaining datasets.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1490531, "CreatorUserId": 5769741, "OwnerUserId": 5769741.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2462462.0, "CurrentDatasourceVersionId": 2504889.0, "ForumId": 1510234, "Type": 2, "CreationDate": "07/25/2021 19:03:02", "LastActivityDate": "07/25/2021", "TotalViews": 2328, "TotalDownloads": 108, "TotalVotes": 9, "TotalKernels": 4}]
[{"Id": 5769741, "UserName": "kingabzpro", "DisplayName": "Abid Ali Awan", "RegisterDate": "09/13/2020", "PerformanceTier": 2}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session df = pd.read_csv("/kaggle/input/pakistan-engineers-data/PITC_Engineering.csv") df.head() # # 0. Data Cleaning df = df.drop("Unnamed: 0", axis=1) df.describe() df.isnull().sum() df = df.dropna(how="any") df.dtypes from datetime import datetime as dt df["Employment_Start_Date"] = pd.to_datetime(df["Employment_Start_Date"]) df["Employment_End_Date"] = pd.to_datetime(df["Employment_End_Date"]) df["Employment_Start_Date"] = df["Employment_Start_Date"].dt.strftime("%Y-%m-%d") df["Employment_End_Date"] = df["Employment_End_Date"].dt.strftime("%Y-%m-%d") df["Employment_period"] = df["Employment_period"].str.replace(" days", "") df["Employment_period"] = df["Employment_period"].astype(int) df = df[df.Employment_period > 0] df.describe() import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go import seaborn as sns # # 1. Cities where firms are located plt.figure(figsize=(20, 10)) df.groupby("Firms_City")["ID"].count().sort_values(ascending=False).plot.bar() # # 2. Employment Duration # # 2-1. Years from decimal import Decimal, ROUND_HALF_UP, ROUND_HALF_EVEN df["period_y"] = df["Employment_period"] / 365 def period(ex): if ex < 1: return 0 elif 1 <= ex < 2: return 1 elif 2 <= ex < 3: return 2 elif 3 <= ex < 4: return 3 elif 4 <= ex < 5: return 4 else: return 5 df["period"] = df["period_y"].apply(period) plt.figure(figsize=(10, 10)) sns.countplot(y="period", data=df) # # 2-2. Days plt.figure(figsize=(10, 30)) sns.countplot(y="Employment_period", data=df) # # 2-3. Engineering Degree and Emplyment Period df.groupby("Engineering_Degree")["Employment_period"].mean().plot.bar() # # 2-4. Firm Category and Emplyment Period df.groupby("Firm_Category")["Employment_period"].mean().plot.bar() # # 3. Engineering Degrees # # 3-1. Engineering Degrees and Firms City df_dc = ( df[["Firms_City", "Engineering_Degree"]] .value_counts() .rename_axis(["Firms_City", "Engineering_Degree"]) .reset_index(name="counts")[:100] ) plt.figure(figsize=(12, 12)) plt.legend(fontsize=10) plt.tick_params(labelsize=10) ax = sns.scatterplot( x="Firms_City", y="counts", hue="Engineering_Degree", size="counts", data=df_dc, sizes=(50, 500), ) plt.xticks(rotation=90) ax.legend(loc="upper left", bbox_to_anchor=(1, 1)) # # 3-2. Engineering Degrees and Firms Category df_dc = ( df[["Firm_Category", "Engineering_Degree"]] .value_counts() .rename_axis(["Firm_Category", "Engineering_Degree"]) .reset_index(name="counts")[:100] ) plt.figure(figsize=(12, 12)) plt.legend(fontsize=10) plt.tick_params(labelsize=10) ax = sns.scatterplot( x="Firm_Category", y="counts", hue="Engineering_Degree", size="counts", data=df_dc, sizes=(50, 500), ) plt.xticks(rotation=90) ax.legend(loc="upper left", bbox_to_anchor=(1, 1)) # # 4. Firms Cities and Firms Category df_CC = ( df[["Firms_City", "Firm_Category"]] .value_counts() .rename_axis(["Firms_City", "Firm_Category"]) .reset_index(name="counts")[:100] ) fig = px.bar(df_CC, x="Firms_City", y="counts", color="Firm_Category") fig.show() # # 5. Engineering Degree and Firms Category df_DC = ( df[["Engineering_Degree", "Firm_Category"]] .value_counts() .rename_axis(["Engineering_Degree", "Firm_Category"]) .reset_index(name="counts")[:100] ) fig = px.bar(df_DC, x="Engineering_Degree", y="counts", color="Firm_Category") fig.show() # # 6. Longtitude and Latitude # # 6-1. Longtitude, Latitude and Province plt.figure(figsize=(30, 30)) sns.lmplot(x="Longitude", y="Latitude", hue="Province", fit_reg=False, data=df) # # 6-2. Longtitude, Latitude and Engineering Degree plt.figure(figsize=(30, 30)) sns.lmplot( x="Longitude", y="Latitude", hue="Engineering_Degree", fit_reg=False, data=df ) # # 6-3. Longtitude, Latitude and Firm Category plt.figure(figsize=(30, 30)) sns.lmplot(x="Longitude", y="Latitude", hue="Firm_Category", fit_reg=False, data=df)
[{"pakistan-engineers-data/PITC_Engineering.csv": {"column_names": "[\"Unnamed: 0\", \"ID\", \"Engineer_Name\", \"Father_Name\", \"Company_Name\", \"Employment_Start_Date\", \"Employment_End_Date\", \"Employment_period\", \"Engineering_Degree\", \"PEC_No\", \"Firm_Category\", \"Firms_City\", \"License_no\", \"Latitude\", \"Longitude\", \"Province\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"ID\": \"float64\", \"Engineer_Name\": \"object\", \"Father_Name\": \"object\", \"Company_Name\": \"object\", \"Employment_Start_Date\": \"object\", \"Employment_End_Date\": \"object\", \"Employment_period\": \"object\", \"Engineering_Degree\": \"object\", \"PEC_No\": \"float64\", \"Firm_Category\": \"object\", \"Firms_City\": \"object\", \"License_no\": \"float64\", \"Latitude\": \"float64\", \"Longitude\": \"float64\", \"Province\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 12861 entries, 0 to 12860\nData columns (total 16 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 12861 non-null int64 \n 1 ID 12556 non-null float64\n 2 Engineer_Name 12556 non-null object \n 3 Father_Name 12556 non-null object \n 4 Company_Name 12556 non-null object \n 5 Employment_Start_Date 12556 non-null object \n 6 Employment_End_Date 12556 non-null object \n 7 Employment_period 12556 non-null object \n 8 Engineering_Degree 12556 non-null object \n 9 PEC_No 12556 non-null float64\n 10 Firm_Category 12556 non-null object \n 11 Firms_City 12556 non-null object \n 12 License_no 12556 non-null float64\n 13 Latitude 10253 non-null float64\n 14 Longitude 10253 non-null float64\n 15 Province 10253 non-null object \ndtypes: float64(5), int64(1), object(10)\nmemory usage: 1.6+ MB\n", "summary": "{\"Unnamed: 0\": {\"count\": 12861.0, \"mean\": 6430.0, \"std\": 3712.795240785573, \"min\": 0.0, \"25%\": 3215.0, \"50%\": 6430.0, \"75%\": 9645.0, \"max\": 12860.0}, \"ID\": {\"count\": 12556.0, \"mean\": 6258.424657534247, \"std\": 3612.3243675420667, \"min\": 1.0, \"25%\": 3128.75, \"50%\": 6260.5, \"75%\": 9384.25, \"max\": 12516.0}, \"PEC_No\": {\"count\": 12556.0, \"mean\": 34041.856562599554, \"std\": 20167.2778422365, \"min\": 3.0, \"25%\": 16569.5, \"50%\": 37036.5, \"75%\": 48368.5, \"max\": 77947.0}, \"License_no\": {\"count\": 12556.0, \"mean\": 16989.195603695443, \"std\": 21956.747670626744, \"min\": 1.0, \"25%\": 2177.0, \"50%\": 9470.0, \"75%\": 16808.0, \"max\": 73187.0}, \"Latitude\": {\"count\": 10253.0, \"mean\": 31.287554959524044, \"std\": 2.895927011368494, \"min\": 24.6558, \"25%\": 30.192, \"50%\": 31.5497, \"75%\": 33.6989, \"max\": 35.9208}, \"Longitude\": {\"count\": 10253.0, \"mean\": 71.5645240027309, \"std\": 2.6918561967656855, \"min\": 64.0903, \"25%\": 69.4486, \"50%\": 73.0369, \"75%\": 73.2527, \"max\": 74.873}}", "examples": "{\"Unnamed: 0\":{\"0\":0,\"1\":1,\"2\":2,\"3\":3},\"ID\":{\"0\":1.0,\"1\":2.0,\"2\":3.0,\"3\":4.0},\"Engineer_Name\":{\"0\":\"ENGR. ASADULLAH FAROOQ\",\"1\":\"ENGR. USAMA HAMID\",\"2\":\"ENGR. SHAHID KHURSHID\",\"3\":\"ENGR. BABAR SHAFIQUE\"},\"Father_Name\":{\"0\":\"CH. MOHAMMAD FAROOQ\",\"1\":\"ABDUL HAMID\",\"2\":\"MUHAMMAD KHURSHID\",\"3\":\"MUHAMMAD SHAFIQUE\"},\"Company_Name\":{\"0\":\"NADEEM CONSTRUCTION COMPANY\",\"1\":\"SHABBIR CONTRACTOR\",\"2\":\"IMAN GROUP\",\"3\":\"NOOR BUILDERS & CO\"},\"Employment_Start_Date\":{\"0\":\"2019-07-15\",\"1\":\"2019-07-23\",\"2\":\"2019-07-19\",\"3\":\"2019-09-24\"},\"Employment_End_Date\":{\"0\":\"2020-06-30\",\"1\":\"2020-06-30\",\"2\":\"2020-06-30\",\"3\":\"2020-06-30\"},\"Employment_period\":{\"0\":\"351 days\",\"1\":\"343 days\",\"2\":\"347 days\",\"3\":\"280 days\"},\"Engineering_Degree\":{\"0\":\"AERO\",\"1\":\"AERO\",\"2\":\"AERO\",\"3\":\"AERO\"},\"PEC_No\":{\"0\":720.0,\"1\":1866.0,\"2\":2211.0,\"3\":2561.0},\"Firm_Category\":{\"0\":\"C2\",\"1\":\"C4\",\"2\":\"C4\",\"3\":\"C6\"},\"Firms_City\":{\"0\":\"bahawalpur\",\"1\":\"muzaffarabad\",\"2\":\"islamabad\",\"3\":\"mirpur khas\"},\"License_no\":{\"0\":967.0,\"1\":12341.0,\"2\":9590.0,\"3\":71292.0},\"Latitude\":{\"0\":29.3956,\"1\":34.37,\"2\":33.6989,\"3\":25.5269},\"Longitude\":{\"0\":71.6722,\"1\":73.4711,\"2\":73.0369,\"3\":69.0111},\"Province\":{\"0\":\"Punjab\",\"1\":\"Azad Kashmir\",\"2\":\"Isl\\u0101m\\u0101b\\u0101d\",\"3\":\"Sindh\"}}"}}]
true
1
<start_data_description><data_path>pakistan-engineers-data/PITC_Engineering.csv: <column_names> ['Unnamed: 0', 'ID', 'Engineer_Name', 'Father_Name', 'Company_Name', 'Employment_Start_Date', 'Employment_End_Date', 'Employment_period', 'Engineering_Degree', 'PEC_No', 'Firm_Category', 'Firms_City', 'License_no', 'Latitude', 'Longitude', 'Province'] <column_types> {'Unnamed: 0': 'int64', 'ID': 'float64', 'Engineer_Name': 'object', 'Father_Name': 'object', 'Company_Name': 'object', 'Employment_Start_Date': 'object', 'Employment_End_Date': 'object', 'Employment_period': 'object', 'Engineering_Degree': 'object', 'PEC_No': 'float64', 'Firm_Category': 'object', 'Firms_City': 'object', 'License_no': 'float64', 'Latitude': 'float64', 'Longitude': 'float64', 'Province': 'object'} <dataframe_Summary> {'Unnamed: 0': {'count': 12861.0, 'mean': 6430.0, 'std': 3712.795240785573, 'min': 0.0, '25%': 3215.0, '50%': 6430.0, '75%': 9645.0, 'max': 12860.0}, 'ID': {'count': 12556.0, 'mean': 6258.424657534247, 'std': 3612.3243675420667, 'min': 1.0, '25%': 3128.75, '50%': 6260.5, '75%': 9384.25, 'max': 12516.0}, 'PEC_No': {'count': 12556.0, 'mean': 34041.856562599554, 'std': 20167.2778422365, 'min': 3.0, '25%': 16569.5, '50%': 37036.5, '75%': 48368.5, 'max': 77947.0}, 'License_no': {'count': 12556.0, 'mean': 16989.195603695443, 'std': 21956.747670626744, 'min': 1.0, '25%': 2177.0, '50%': 9470.0, '75%': 16808.0, 'max': 73187.0}, 'Latitude': {'count': 10253.0, 'mean': 31.287554959524044, 'std': 2.895927011368494, 'min': 24.6558, '25%': 30.192, '50%': 31.5497, '75%': 33.6989, 'max': 35.9208}, 'Longitude': {'count': 10253.0, 'mean': 71.5645240027309, 'std': 2.6918561967656855, 'min': 64.0903, '25%': 69.4486, '50%': 73.0369, '75%': 73.2527, 'max': 74.873}} <dataframe_info> RangeIndex: 12861 entries, 0 to 12860 Data columns (total 16 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 12861 non-null int64 1 ID 12556 non-null float64 2 Engineer_Name 12556 non-null object 3 Father_Name 12556 non-null object 4 Company_Name 12556 non-null object 5 Employment_Start_Date 12556 non-null object 6 Employment_End_Date 12556 non-null object 7 Employment_period 12556 non-null object 8 Engineering_Degree 12556 non-null object 9 PEC_No 12556 non-null float64 10 Firm_Category 12556 non-null object 11 Firms_City 12556 non-null object 12 License_no 12556 non-null float64 13 Latitude 10253 non-null float64 14 Longitude 10253 non-null float64 15 Province 10253 non-null object dtypes: float64(5), int64(1), object(10) memory usage: 1.6+ MB <some_examples> {'Unnamed: 0': {'0': 0, '1': 1, '2': 2, '3': 3}, 'ID': {'0': 1.0, '1': 2.0, '2': 3.0, '3': 4.0}, 'Engineer_Name': {'0': 'ENGR. ASADULLAH FAROOQ', '1': 'ENGR. USAMA HAMID', '2': 'ENGR. SHAHID KHURSHID', '3': 'ENGR. BABAR SHAFIQUE'}, 'Father_Name': {'0': 'CH. MOHAMMAD FAROOQ', '1': 'ABDUL HAMID', '2': 'MUHAMMAD KHURSHID', '3': 'MUHAMMAD SHAFIQUE'}, 'Company_Name': {'0': 'NADEEM CONSTRUCTION COMPANY', '1': 'SHABBIR CONTRACTOR', '2': 'IMAN GROUP', '3': 'NOOR BUILDERS & CO'}, 'Employment_Start_Date': {'0': '2019-07-15', '1': '2019-07-23', '2': '2019-07-19', '3': '2019-09-24'}, 'Employment_End_Date': {'0': '2020-06-30', '1': '2020-06-30', '2': '2020-06-30', '3': '2020-06-30'}, 'Employment_period': {'0': '351 days', '1': '343 days', '2': '347 days', '3': '280 days'}, 'Engineering_Degree': {'0': 'AERO', '1': 'AERO', '2': 'AERO', '3': 'AERO'}, 'PEC_No': {'0': 720.0, '1': 1866.0, '2': 2211.0, '3': 2561.0}, 'Firm_Category': {'0': 'C2', '1': 'C4', '2': 'C4', '3': 'C6'}, 'Firms_City': {'0': 'bahawalpur', '1': 'muzaffarabad', '2': 'islamabad', '3': 'mirpur khas'}, 'License_no': {'0': 967.0, '1': 12341.0, '2': 9590.0, '3': 71292.0}, 'Latitude': {'0': 29.3956, '1': 34.37, '2': 33.6989, '3': 25.5269}, 'Longitude': {'0': 71.6722, '1': 73.4711, '2': 73.0369, '3': 69.0111}, 'Province': {'0': 'Punjab', '1': 'Azad Kashmir', '2': 'Islāmābād', '3': 'Sindh'}} <end_description>
1,691
4
3,316
1,691
69433474
<jupyter_start><jupyter_text>Wine Quality **Data Set Information:** The dataset was downloaded from the UCI Machine Learning Repository. The two datasets are related to red and white variants of the Portuguese "Vinho Verde" wine. The reference [Cortez et al., 2009]. Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.). These datasets can be viewed as classification or regression tasks. The classes are ordered and not balanced (e.g. there are munch more normal wines than excellent or poor ones). Outlier detection algorithms could be used to detect the few excellent or poor wines. Also, we are not sure if all input variables are relevant. So it could be interesting to test feature selection methods. Two datasets were combined and few values were randomly removed. **Attribute Information:** For more information, read [Cortez et al., 2009]. Input variables (based on physicochemical tests): 1 - fixed acidity 2 - volatile acidity 3 - citric acid 4 - residual sugar 5 - chlorides 6 - free sulfur dioxide 7 - total sulfur dioxide 8 - density 9 - pH 10 - sulphates 11 - alcohol Output variable (based on sensory data): 12 - quality (score between 0 and 10) **Acknowledgements:** P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. Modeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009. Kaggle dataset identifier: wine-quality <jupyter_code>import pandas as pd df = pd.read_csv('wine-quality/winequalityN.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 6497 entries, 0 to 6496 Data columns (total 13 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 type 6497 non-null object 1 fixed acidity 6487 non-null float64 2 volatile acidity 6489 non-null float64 3 citric acid 6494 non-null float64 4 residual sugar 6495 non-null float64 5 chlorides 6495 non-null float64 6 free sulfur dioxide 6497 non-null float64 7 total sulfur dioxide 6497 non-null float64 8 density 6497 non-null float64 9 pH 6488 non-null float64 10 sulphates 6493 non-null float64 11 alcohol 6497 non-null float64 12 quality 6497 non-null int64 dtypes: float64(11), int64(1), object(1) memory usage: 660.0+ KB <jupyter_text>Examples: { "type": "white", "fixed acidity": 7.0, "volatile acidity": 0.27, "citric acid": 0.36, "residual sugar": 20.7, "chlorides": 0.045, "free sulfur dioxide": 45, "total sulfur dioxide": 170, "density": 1.001, "pH": 3.0, "sulphates": 0.45, "alcohol": 8.8, "quality": 6 } { "type": "white", "fixed acidity": 6.3, "volatile acidity": 0.30000000000000004, "citric acid": 0.34, "residual sugar": 1.6, "chlorides": 0.049, "free sulfur dioxide": 14, "total sulfur dioxide": 132, "density": 0.994, "pH": 3.3, "sulphates": 0.49, "alcohol": 9.5, "quality": 6 } { "type": "white", "fixed acidity": 8.1, "volatile acidity": 0.28, "citric acid": 0.4, "residual sugar": 6.9, "chlorides": 0.05, "free sulfur dioxide": 30, "total sulfur dioxide": 97, "density": 0.9951000000000001, "pH": 3.26, "sulphates": 0.44, "alcohol": 10.1, "quality": 6 } { "type": "white", "fixed acidity": 7.2, "volatile acidity": 0.23, "citric acid": 0.32, "residual sugar": 8.5, "chlorides": 0.058, "free sulfur dioxide": 47, "total sulfur dioxide": 186, "density": 0.9956, "pH": 3.19, "sulphates": 0.4, "alcohol": 9.9, "quality": 6 } <jupyter_script># Dataset is taken from Kaggle(https://www.kaggle.com/rajyellow46/wine-quality). The two datasets are related to red and white variants of the Portuguese "Vinho Verde" wine. The reference [Cortez et al., 2009]. Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.). # These datasets can be viewed as classification or regression tasks. Following dataset having different variables, Some of them are correlated to each other. lets perform some analysis and check how data will predict quality of wine. # First we have to import libraries, these are libraries help us to import data also help us to do analysis. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib from matplotlib import pyplot as plt import seaborn as sns sns.set(color_codes=True) from sklearn.linear_model import LinearRegression, SGDClassifier, RidgeClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier df = pd.read_csv("/kaggle/input/wine-quality/winequalityN.csv") df.head() ## check Nan value for i in df.columns: print(i + ": " + str(df[i].isna().sum())) # correlation gives us relation between each varibale. how much each variable is contributing. # correlation shows how each feature is dependent on other. from this will find out colinearity between each function, if colinearity is more than 0.5 that leads to problem however we can avoid that problem by dropping feature which highly correlated to each feature. correlation = df.corr() plt.figure(figsize=(15, 8)) sns.heatmap(correlation, annot=True, cmap="Blues") df.head() # with the help of correaltion check weather na values columns affects low in that case if you drop values thah will be fine but will go with replacement with median value of particular feture df["pH"] = df["pH"].fillna(df["pH"].median()) df["sulphates"] = df["sulphates"].fillna(df["sulphates"].median()) df["chlorides"] = df["chlorides"].fillna(df["chlorides"].median()) df["residual sugar"] = df["residual sugar"].fillna(df["residual sugar"].median()) df["citric acid"] = df["citric acid"].fillna(df["citric acid"].median()) df["volatile acidity"] = df["volatile acidity"].fillna(df["volatile acidity"].median()) df["fixed acidity"] = df["fixed acidity"].fillna(df["fixed acidity"].median()) x = np.unique(df["quality"]) x # Now as we can see quality score is vaires in between 3 to 8, as we know low quality wine having low score and high quality wine having high score accordingly we will going to assign class to score and try to predict classes. def values(x): if x <= 5: x = "low" elif x > 5 and x < 7: x = "medium" else: x = "high" return x df["level"] = df["quality"].apply(lambda x: values(x)) # using preprocessing method convert quality classes into numerical variable and apply ordinal encoding method. label = LabelEncoder() quality_score = label.fit_transform(df["level"]) print(quality_score) print((label.classes_)) # seaborn packages gives us nice visualitons where in barplot helps us to predict how much each classes having alcohol. plt.figure(figsize=(15, 8)) ax = sns.barplot(x="level", y="alcohol", data=df) # Again will check how much sulphates is used in each classes and which class had used more sulphate. plt.figure(figsize=(15, 8)) ax = sns.barplot(x="level", y="sulphates", data=df) ax = sns.countplot(x="level", data=df, palette="Set3") # outliers lead to error in data, to avoid that firstly check weather outliers are present in data,if outliers are there then try to remove and avoid error. # outliers find out using histogram using matplotlib function and also will check how each variable is spread and based on that will decide which algorithm is best suitable for predicting accurate values. df.hist(bins=10, figsize=(15, 12)) plt.show() # In introduction part we already discussed about type of wine is present in data, so will use dummuy encoding method for converting categorical feature into numerical. df["type"] = pd.get_dummies(df["type"], drop_first=True) x = df.iloc[:, :-2] x.head() ax = sns.countplot(x="type", data=df, palette="Set3") # from above count plot you can see most of the data is from white wine. # to achieve minimum global minima we have to reduce cost function as in dataset some of values having high values to avoid errors, will perform feature scaling standard = StandardScaler() std_x = standard.fit_transform(x) x_train, x_test, y_train, y_test = train_test_split( std_x, quality_score, test_size=0.20, random_state=40 ) print("Training data:{}".format(x_train.shape)) print("Test data:{}".format(x_test.shape)) results = [] clf = SGDClassifier(max_iter=10000, random_state=0) clf.fit(x_train, y_train) y_predicted = clf.predict(x_test) score = clf.score(x_test, y_test) print(score) results.append(score) clf_1 = RidgeClassifier(alpha=2, max_iter=10000) clf_1.fit(x_train, y_train) y_predicted = clf_1.predict(x_test) score = clf_1.score(x_test, y_test) print(score) results.append(score) clf = LogisticRegression(max_iter=10000, solver="newton-cg", random_state=0, n_jobs=2) clf.fit(x_train, y_train) y_predicted = clf.predict(x_test) score = clf.score(x_test, y_test) print(score) results.append(score) cnf_matrix = confusion_matrix(y_test, y_predicted) np.set_printoptions(precision=2) cnf_matrix import itertools def plot_confusion_matrix( cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues ): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print("Confusion matrix, without normalization") print(cm) plt.imshow(cm, interpolation="nearest", cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = ".2f" if normalize else "d" thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text( j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", ) plt.ylabel("True label") plt.xlabel("Predicted label") plt.tight_layout() classes = df["level"].value_counts() plt.figure() plot_confusion_matrix( cnf_matrix, classes=classes.index, title="Confusion matrix, without normalization" ) # With normalization plt.figure() plot_confusion_matrix( cnf_matrix, classes=classes.index, normalize=True, title="Normalized confusion matrix", ) plt.show() clf_1 = DecisionTreeClassifier( criterion="entropy", min_samples_split=7, max_depth=8, ) clf_1.fit(x_train, y_train) y_predicted = clf_1.predict(x_test) score = clf_1.score(x_test, y_test) print(score) results.append(score) cnf_matrix = confusion_matrix(y_test, y_predicted) np.set_printoptions(precision=2) cnf_matrix # Build Model clf = RandomForestClassifier( criterion="entropy", bootstrap=False, n_estimators=1000, n_jobs=2, verbose=1, max_features=3, ) clf.fit(x_train, y_train) y_predicted = clf.predict(x_test) score = clf.score(x_test, y_test) results.append(score) print(score) cnf_matrix = confusion_matrix(y_test, y_predicted) np.set_printoptions(precision=2) cnf_matrix import itertools def plot_confusion_matrix( cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues ): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print("Confusion matrix, without normalization") print(cm) plt.imshow(cm, interpolation="nearest", cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = ".2f" if normalize else "d" thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text( j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", ) plt.ylabel("True label") plt.xlabel("Predicted label") plt.tight_layout() plt.figure() plot_confusion_matrix( cnf_matrix, classes=classes.index, title="Confusion matrix, without normalization" ) # With normalization plt.figure() plot_confusion_matrix( cnf_matrix, classes=classes.index, normalize=True, title="Normalized confusion matrix", ) plt.show() result_df = pd.DataFrame( { "ML Models": [ "SGDClassifier", "Ridge classifier", "Logistic Regression", "Decision Tree", "Random Forest", ], "Score": results, } ) result_df
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/433/69433474.ipynb
wine-quality
rajyellow46
[{"Id": 69433474, "ScriptId": 18951994, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7098336, "CreationDate": "07/31/2021 01:05:28", "VersionNumber": 2.0, "Title": "EDA_wine_quality", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 298.0, "LinesInsertedFromPrevious": 191.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 107.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 92595141, "KernelVersionId": 69433474, "SourceDatasetVersionId": 52633}]
[{"Id": 52633, "DatasetId": 35901, "DatasourceVersionId": 55023, "CreatorUserId": 1386728, "LicenseName": "Other (specified in description)", "CreationDate": "07/09/2018 16:08:15", "VersionNumber": 1.0, "Title": "Wine Quality", "Slug": "wine-quality", "Subtitle": NaN, "Description": "**Data Set Information:**\n\nThe dataset was downloaded from the UCI Machine Learning Repository. \n\nThe two datasets are related to red and white variants of the Portuguese \"Vinho Verde\" wine. The reference [Cortez et al., 2009]. Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.). \n\nThese datasets can be viewed as classification or regression tasks. The classes are ordered and not balanced (e.g. there are munch more normal wines than excellent or poor ones). Outlier detection algorithms could be used to detect the few excellent or poor wines. Also, we are not sure if all input variables are relevant. So it could be interesting to test feature selection methods. \n\nTwo datasets were combined and few values were randomly removed.\n\n**Attribute Information:**\n\nFor more information, read [Cortez et al., 2009]. \nInput variables (based on physicochemical tests): \n1 - fixed acidity \n2 - volatile acidity \n3 - citric acid \n4 - residual sugar \n5 - chlorides \n6 - free sulfur dioxide \n7 - total sulfur dioxide \n8 - density \n9 - pH \n10 - sulphates \n11 - alcohol \nOutput variable (based on sensory data): \n12 - quality (score between 0 and 10)\n\n**Acknowledgements:**\n\nP. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. \nModeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009.", "VersionNotes": "Initial release", "TotalCompressedBytes": 390376.0, "TotalUncompressedBytes": 390376.0}]
[{"Id": 35901, "CreatorUserId": 1386728, "OwnerUserId": 1386728.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 52633.0, "CurrentDatasourceVersionId": 55023.0, "ForumId": 44308, "Type": 2, "CreationDate": "07/09/2018 16:08:15", "LastActivityDate": "07/09/2018", "TotalViews": 206536, "TotalDownloads": 31086, "TotalVotes": 229, "TotalKernels": 153}]
[{"Id": 1386728, "UserName": "rajyellow46", "DisplayName": "Raj Parmar", "RegisterDate": "11/03/2017", "PerformanceTier": 0}]
# Dataset is taken from Kaggle(https://www.kaggle.com/rajyellow46/wine-quality). The two datasets are related to red and white variants of the Portuguese "Vinho Verde" wine. The reference [Cortez et al., 2009]. Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.). # These datasets can be viewed as classification or regression tasks. Following dataset having different variables, Some of them are correlated to each other. lets perform some analysis and check how data will predict quality of wine. # First we have to import libraries, these are libraries help us to import data also help us to do analysis. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import matplotlib from matplotlib import pyplot as plt import seaborn as sns sns.set(color_codes=True) from sklearn.linear_model import LinearRegression, SGDClassifier, RidgeClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier df = pd.read_csv("/kaggle/input/wine-quality/winequalityN.csv") df.head() ## check Nan value for i in df.columns: print(i + ": " + str(df[i].isna().sum())) # correlation gives us relation between each varibale. how much each variable is contributing. # correlation shows how each feature is dependent on other. from this will find out colinearity between each function, if colinearity is more than 0.5 that leads to problem however we can avoid that problem by dropping feature which highly correlated to each feature. correlation = df.corr() plt.figure(figsize=(15, 8)) sns.heatmap(correlation, annot=True, cmap="Blues") df.head() # with the help of correaltion check weather na values columns affects low in that case if you drop values thah will be fine but will go with replacement with median value of particular feture df["pH"] = df["pH"].fillna(df["pH"].median()) df["sulphates"] = df["sulphates"].fillna(df["sulphates"].median()) df["chlorides"] = df["chlorides"].fillna(df["chlorides"].median()) df["residual sugar"] = df["residual sugar"].fillna(df["residual sugar"].median()) df["citric acid"] = df["citric acid"].fillna(df["citric acid"].median()) df["volatile acidity"] = df["volatile acidity"].fillna(df["volatile acidity"].median()) df["fixed acidity"] = df["fixed acidity"].fillna(df["fixed acidity"].median()) x = np.unique(df["quality"]) x # Now as we can see quality score is vaires in between 3 to 8, as we know low quality wine having low score and high quality wine having high score accordingly we will going to assign class to score and try to predict classes. def values(x): if x <= 5: x = "low" elif x > 5 and x < 7: x = "medium" else: x = "high" return x df["level"] = df["quality"].apply(lambda x: values(x)) # using preprocessing method convert quality classes into numerical variable and apply ordinal encoding method. label = LabelEncoder() quality_score = label.fit_transform(df["level"]) print(quality_score) print((label.classes_)) # seaborn packages gives us nice visualitons where in barplot helps us to predict how much each classes having alcohol. plt.figure(figsize=(15, 8)) ax = sns.barplot(x="level", y="alcohol", data=df) # Again will check how much sulphates is used in each classes and which class had used more sulphate. plt.figure(figsize=(15, 8)) ax = sns.barplot(x="level", y="sulphates", data=df) ax = sns.countplot(x="level", data=df, palette="Set3") # outliers lead to error in data, to avoid that firstly check weather outliers are present in data,if outliers are there then try to remove and avoid error. # outliers find out using histogram using matplotlib function and also will check how each variable is spread and based on that will decide which algorithm is best suitable for predicting accurate values. df.hist(bins=10, figsize=(15, 12)) plt.show() # In introduction part we already discussed about type of wine is present in data, so will use dummuy encoding method for converting categorical feature into numerical. df["type"] = pd.get_dummies(df["type"], drop_first=True) x = df.iloc[:, :-2] x.head() ax = sns.countplot(x="type", data=df, palette="Set3") # from above count plot you can see most of the data is from white wine. # to achieve minimum global minima we have to reduce cost function as in dataset some of values having high values to avoid errors, will perform feature scaling standard = StandardScaler() std_x = standard.fit_transform(x) x_train, x_test, y_train, y_test = train_test_split( std_x, quality_score, test_size=0.20, random_state=40 ) print("Training data:{}".format(x_train.shape)) print("Test data:{}".format(x_test.shape)) results = [] clf = SGDClassifier(max_iter=10000, random_state=0) clf.fit(x_train, y_train) y_predicted = clf.predict(x_test) score = clf.score(x_test, y_test) print(score) results.append(score) clf_1 = RidgeClassifier(alpha=2, max_iter=10000) clf_1.fit(x_train, y_train) y_predicted = clf_1.predict(x_test) score = clf_1.score(x_test, y_test) print(score) results.append(score) clf = LogisticRegression(max_iter=10000, solver="newton-cg", random_state=0, n_jobs=2) clf.fit(x_train, y_train) y_predicted = clf.predict(x_test) score = clf.score(x_test, y_test) print(score) results.append(score) cnf_matrix = confusion_matrix(y_test, y_predicted) np.set_printoptions(precision=2) cnf_matrix import itertools def plot_confusion_matrix( cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues ): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print("Confusion matrix, without normalization") print(cm) plt.imshow(cm, interpolation="nearest", cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = ".2f" if normalize else "d" thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text( j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", ) plt.ylabel("True label") plt.xlabel("Predicted label") plt.tight_layout() classes = df["level"].value_counts() plt.figure() plot_confusion_matrix( cnf_matrix, classes=classes.index, title="Confusion matrix, without normalization" ) # With normalization plt.figure() plot_confusion_matrix( cnf_matrix, classes=classes.index, normalize=True, title="Normalized confusion matrix", ) plt.show() clf_1 = DecisionTreeClassifier( criterion="entropy", min_samples_split=7, max_depth=8, ) clf_1.fit(x_train, y_train) y_predicted = clf_1.predict(x_test) score = clf_1.score(x_test, y_test) print(score) results.append(score) cnf_matrix = confusion_matrix(y_test, y_predicted) np.set_printoptions(precision=2) cnf_matrix # Build Model clf = RandomForestClassifier( criterion="entropy", bootstrap=False, n_estimators=1000, n_jobs=2, verbose=1, max_features=3, ) clf.fit(x_train, y_train) y_predicted = clf.predict(x_test) score = clf.score(x_test, y_test) results.append(score) print(score) cnf_matrix = confusion_matrix(y_test, y_predicted) np.set_printoptions(precision=2) cnf_matrix import itertools def plot_confusion_matrix( cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues ): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print("Confusion matrix, without normalization") print(cm) plt.imshow(cm, interpolation="nearest", cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = ".2f" if normalize else "d" thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text( j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", ) plt.ylabel("True label") plt.xlabel("Predicted label") plt.tight_layout() plt.figure() plot_confusion_matrix( cnf_matrix, classes=classes.index, title="Confusion matrix, without normalization" ) # With normalization plt.figure() plot_confusion_matrix( cnf_matrix, classes=classes.index, normalize=True, title="Normalized confusion matrix", ) plt.show() result_df = pd.DataFrame( { "ML Models": [ "SGDClassifier", "Ridge classifier", "Logistic Regression", "Decision Tree", "Random Forest", ], "Score": results, } ) result_df
[{"wine-quality/winequalityN.csv": {"column_names": "[\"type\", \"fixed acidity\", \"volatile acidity\", \"citric acid\", \"residual sugar\", \"chlorides\", \"free sulfur dioxide\", \"total sulfur dioxide\", \"density\", \"pH\", \"sulphates\", \"alcohol\", \"quality\"]", "column_data_types": "{\"type\": \"object\", \"fixed acidity\": \"float64\", \"volatile acidity\": \"float64\", \"citric acid\": \"float64\", \"residual sugar\": \"float64\", \"chlorides\": \"float64\", \"free sulfur dioxide\": \"float64\", \"total sulfur dioxide\": \"float64\", \"density\": \"float64\", \"pH\": \"float64\", \"sulphates\": \"float64\", \"alcohol\": \"float64\", \"quality\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6497 entries, 0 to 6496\nData columns (total 13 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 type 6497 non-null object \n 1 fixed acidity 6487 non-null float64\n 2 volatile acidity 6489 non-null float64\n 3 citric acid 6494 non-null float64\n 4 residual sugar 6495 non-null float64\n 5 chlorides 6495 non-null float64\n 6 free sulfur dioxide 6497 non-null float64\n 7 total sulfur dioxide 6497 non-null float64\n 8 density 6497 non-null float64\n 9 pH 6488 non-null float64\n 10 sulphates 6493 non-null float64\n 11 alcohol 6497 non-null float64\n 12 quality 6497 non-null int64 \ndtypes: float64(11), int64(1), object(1)\nmemory usage: 660.0+ KB\n", "summary": "{\"fixed acidity\": {\"count\": 6487.0, \"mean\": 7.2165793124710955, \"std\": 1.296749856526477, \"min\": 3.8, \"25%\": 6.4, \"50%\": 7.0, \"75%\": 7.7, \"max\": 15.9}, \"volatile acidity\": {\"count\": 6489.0, \"mean\": 0.33969101556480197, \"std\": 0.16464902864429282, \"min\": 0.08, \"25%\": 0.23, \"50%\": 0.29, \"75%\": 0.4, \"max\": 1.58}, \"citric acid\": {\"count\": 6494.0, \"mean\": 0.3187218971358176, \"std\": 0.14526480053397792, \"min\": 0.0, \"25%\": 0.25, \"50%\": 0.31, \"75%\": 0.39, \"max\": 1.66}, \"residual sugar\": {\"count\": 6495.0, \"mean\": 5.444326404926867, \"std\": 4.7581247426727105, \"min\": 0.6, \"25%\": 1.8, \"50%\": 3.0, \"75%\": 8.1, \"max\": 65.8}, \"chlorides\": {\"count\": 6495.0, \"mean\": 0.05604157043879908, \"std\": 0.03503602522758981, \"min\": 0.009, \"25%\": 0.038, \"50%\": 0.047, \"75%\": 0.065, \"max\": 0.611}, \"free sulfur dioxide\": {\"count\": 6497.0, \"mean\": 30.525319378174544, \"std\": 17.7493997720025, \"min\": 1.0, \"25%\": 17.0, \"50%\": 29.0, \"75%\": 41.0, \"max\": 289.0}, \"total sulfur dioxide\": {\"count\": 6497.0, \"mean\": 115.7445744189626, \"std\": 56.52185452263028, \"min\": 6.0, \"25%\": 77.0, \"50%\": 118.0, \"75%\": 156.0, \"max\": 440.0}, \"density\": {\"count\": 6497.0, \"mean\": 0.9946966338309989, \"std\": 0.002998673003719039, \"min\": 0.98711, \"25%\": 0.99234, \"50%\": 0.99489, \"75%\": 0.99699, \"max\": 1.03898}, \"pH\": {\"count\": 6488.0, \"mean\": 3.218395499383477, \"std\": 0.1607483065508832, \"min\": 2.72, \"25%\": 3.11, \"50%\": 3.21, \"75%\": 3.32, \"max\": 4.01}, \"sulphates\": {\"count\": 6493.0, \"mean\": 0.531215154782073, \"std\": 0.14881412131628377, \"min\": 0.22, \"25%\": 0.43, \"50%\": 0.51, \"75%\": 0.6, \"max\": 2.0}, \"alcohol\": {\"count\": 6497.0, \"mean\": 10.491800831149455, \"std\": 1.192711748868981, \"min\": 8.0, \"25%\": 9.5, \"50%\": 10.3, \"75%\": 11.3, \"max\": 14.9}, \"quality\": {\"count\": 6497.0, \"mean\": 5.818377712790519, \"std\": 0.8732552715311248, \"min\": 3.0, \"25%\": 5.0, \"50%\": 6.0, \"75%\": 6.0, \"max\": 9.0}}", "examples": "{\"type\":{\"0\":\"white\",\"1\":\"white\",\"2\":\"white\",\"3\":\"white\"},\"fixed acidity\":{\"0\":7.0,\"1\":6.3,\"2\":8.1,\"3\":7.2},\"volatile acidity\":{\"0\":0.27,\"1\":0.3,\"2\":0.28,\"3\":0.23},\"citric acid\":{\"0\":0.36,\"1\":0.34,\"2\":0.4,\"3\":0.32},\"residual sugar\":{\"0\":20.7,\"1\":1.6,\"2\":6.9,\"3\":8.5},\"chlorides\":{\"0\":0.045,\"1\":0.049,\"2\":0.05,\"3\":0.058},\"free sulfur dioxide\":{\"0\":45.0,\"1\":14.0,\"2\":30.0,\"3\":47.0},\"total sulfur dioxide\":{\"0\":170.0,\"1\":132.0,\"2\":97.0,\"3\":186.0},\"density\":{\"0\":1.001,\"1\":0.994,\"2\":0.9951,\"3\":0.9956},\"pH\":{\"0\":3.0,\"1\":3.3,\"2\":3.26,\"3\":3.19},\"sulphates\":{\"0\":0.45,\"1\":0.49,\"2\":0.44,\"3\":0.4},\"alcohol\":{\"0\":8.8,\"1\":9.5,\"2\":10.1,\"3\":9.9},\"quality\":{\"0\":6,\"1\":6,\"2\":6,\"3\":6}}"}}]
true
1
<start_data_description><data_path>wine-quality/winequalityN.csv: <column_names> ['type', 'fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol', 'quality'] <column_types> {'type': 'object', 'fixed acidity': 'float64', 'volatile acidity': 'float64', 'citric acid': 'float64', 'residual sugar': 'float64', 'chlorides': 'float64', 'free sulfur dioxide': 'float64', 'total sulfur dioxide': 'float64', 'density': 'float64', 'pH': 'float64', 'sulphates': 'float64', 'alcohol': 'float64', 'quality': 'int64'} <dataframe_Summary> {'fixed acidity': {'count': 6487.0, 'mean': 7.2165793124710955, 'std': 1.296749856526477, 'min': 3.8, '25%': 6.4, '50%': 7.0, '75%': 7.7, 'max': 15.9}, 'volatile acidity': {'count': 6489.0, 'mean': 0.33969101556480197, 'std': 0.16464902864429282, 'min': 0.08, '25%': 0.23, '50%': 0.29, '75%': 0.4, 'max': 1.58}, 'citric acid': {'count': 6494.0, 'mean': 0.3187218971358176, 'std': 0.14526480053397792, 'min': 0.0, '25%': 0.25, '50%': 0.31, '75%': 0.39, 'max': 1.66}, 'residual sugar': {'count': 6495.0, 'mean': 5.444326404926867, 'std': 4.7581247426727105, 'min': 0.6, '25%': 1.8, '50%': 3.0, '75%': 8.1, 'max': 65.8}, 'chlorides': {'count': 6495.0, 'mean': 0.05604157043879908, 'std': 0.03503602522758981, 'min': 0.009, '25%': 0.038, '50%': 0.047, '75%': 0.065, 'max': 0.611}, 'free sulfur dioxide': {'count': 6497.0, 'mean': 30.525319378174544, 'std': 17.7493997720025, 'min': 1.0, '25%': 17.0, '50%': 29.0, '75%': 41.0, 'max': 289.0}, 'total sulfur dioxide': {'count': 6497.0, 'mean': 115.7445744189626, 'std': 56.52185452263028, 'min': 6.0, '25%': 77.0, '50%': 118.0, '75%': 156.0, 'max': 440.0}, 'density': {'count': 6497.0, 'mean': 0.9946966338309989, 'std': 0.002998673003719039, 'min': 0.98711, '25%': 0.99234, '50%': 0.99489, '75%': 0.99699, 'max': 1.03898}, 'pH': {'count': 6488.0, 'mean': 3.218395499383477, 'std': 0.1607483065508832, 'min': 2.72, '25%': 3.11, '50%': 3.21, '75%': 3.32, 'max': 4.01}, 'sulphates': {'count': 6493.0, 'mean': 0.531215154782073, 'std': 0.14881412131628377, 'min': 0.22, '25%': 0.43, '50%': 0.51, '75%': 0.6, 'max': 2.0}, 'alcohol': {'count': 6497.0, 'mean': 10.491800831149455, 'std': 1.192711748868981, 'min': 8.0, '25%': 9.5, '50%': 10.3, '75%': 11.3, 'max': 14.9}, 'quality': {'count': 6497.0, 'mean': 5.818377712790519, 'std': 0.8732552715311248, 'min': 3.0, '25%': 5.0, '50%': 6.0, '75%': 6.0, 'max': 9.0}} <dataframe_info> RangeIndex: 6497 entries, 0 to 6496 Data columns (total 13 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 type 6497 non-null object 1 fixed acidity 6487 non-null float64 2 volatile acidity 6489 non-null float64 3 citric acid 6494 non-null float64 4 residual sugar 6495 non-null float64 5 chlorides 6495 non-null float64 6 free sulfur dioxide 6497 non-null float64 7 total sulfur dioxide 6497 non-null float64 8 density 6497 non-null float64 9 pH 6488 non-null float64 10 sulphates 6493 non-null float64 11 alcohol 6497 non-null float64 12 quality 6497 non-null int64 dtypes: float64(11), int64(1), object(1) memory usage: 660.0+ KB <some_examples> {'type': {'0': 'white', '1': 'white', '2': 'white', '3': 'white'}, 'fixed acidity': {'0': 7.0, '1': 6.3, '2': 8.1, '3': 7.2}, 'volatile acidity': {'0': 0.27, '1': 0.3, '2': 0.28, '3': 0.23}, 'citric acid': {'0': 0.36, '1': 0.34, '2': 0.4, '3': 0.32}, 'residual sugar': {'0': 20.7, '1': 1.6, '2': 6.9, '3': 8.5}, 'chlorides': {'0': 0.045, '1': 0.049, '2': 0.05, '3': 0.058}, 'free sulfur dioxide': {'0': 45.0, '1': 14.0, '2': 30.0, '3': 47.0}, 'total sulfur dioxide': {'0': 170.0, '1': 132.0, '2': 97.0, '3': 186.0}, 'density': {'0': 1.001, '1': 0.994, '2': 0.9951, '3': 0.9956}, 'pH': {'0': 3.0, '1': 3.3, '2': 3.26, '3': 3.19}, 'sulphates': {'0': 0.45, '1': 0.49, '2': 0.44, '3': 0.4}, 'alcohol': {'0': 8.8, '1': 9.5, '2': 10.1, '3': 9.9}, 'quality': {'0': 6, '1': 6, '2': 6, '3': 6}} <end_description>
2,944
2
4,395
2,944
69433313
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt plt.style.use("bmh") import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split from sklearn.ensemble import ( RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ) from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from xgboost import XGBClassifier from sklearn.svm import SVC from sklearn.decomposition import PCA from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from sklearn.feature_selection import mutual_info_classif from sklearn.feature_selection import SelectKBest from imblearn.over_sampling import SMOTE from imblearn.under_sampling import RandomUnderSampler from imblearn.pipeline import Pipeline import time import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df = pd.read_csv("/kaggle/input/dont-overfit-ii/train.csv", index_col="id") x_test = pd.read_csv("/kaggle/input/dont-overfit-ii/test.csv", index_col="id") display(df.head()) display(x_test.head()) display(df.describe().T) print(df.info()) print(df.select_dtypes("object")) print(df.shape) print(x_test.shape) df.isnull().sum() sns.catplot(x="target", kind="count", palette="ch:.25", data=df) # grid = sns.PairGrid(df.iloc[:,1:20]) # grid.map_offdiag(sns.scatterplot) # grid.map_diag(sns.histplot) # grid.add_legend() # # see percent of missing value print(df.isnull().sum()) titanic_null_count = df.isnull().sum() * 100 / len(df) miss_titanic_plot = titanic_null_count.sort_values(ascending=False)[:10].plot( kind="barh" ) print(titanic_null_count.sort_values(ascending=False)) # correlation=abs(df.corr()['target']) >= 0.4 # correlation[correlation==True] df.shape # **data has 0 missing values** Y = df["target"] x = df.drop(["target"], axis=1) over = SMOTE() under = RandomUnderSampler() steps = [("o", over), ("u", under)] pipeline = Pipeline(steps=steps) x, Y = pipeline.fit_resample(x, Y) print(x.shape) print(Y.shape) display(x.describe().T) sns.catplot(x=Y, kind="count", palette="ch:.25", data=df) # MI_score = mutual_info_classif(x, y, random_state=0) # fe oshkla fel satr ely gy msh 3aref azboto yrg3ha list of lists # features=[] # ll={} # feature_names = x.columns # # Print the name and mutual information score of each feature # for feature in zip(feature_names, MI_score): # features.append(feature) # print(features) # for i in range(len(features)): # ll[features[i][1]]=features[i][0] # for i in sorted (ll.keys()) : # print(i, end = " ") # skb = SelectKBest(score_func=mutual_info_classif, k=20) # new = pd.DataFrame(skb.fit_transform(x, y)) # print(new.shape) # new.head() scaler = StandardScaler() x_cols = x.columns x_test_cols = x_test.columns x = pd.DataFrame(scaler.fit_transform(x.astype(float)), columns=x_cols) x.index = x.index x_test = pd.DataFrame( scaler.fit_transform(x_test.astype(float)), columns=x_test_cols, index=x_test.index ) pca = PCA().fit(x_test) plt.rcParams["figure.figsize"] = (20, 10) fig, ax = plt.subplots() y = np.cumsum(pca.explained_variance_ratio_) plt.ylim(0.0, 1.1) plt.plot(y, marker="o", linestyle="--", color="b") plt.xlabel("Number of Components") plt.ylabel("Cumulative variance (%)") plt.title("The number of components needed to explain variance") plt.axhline(y=0.95, color="r", linestyle="-") plt.text(0.5, 0.85, "95% cut-off threshold", color="red", fontsize=16) ax.grid(axis="x") plt.show() def reduce_data(df, comp_number=280): pca = PCA(n_components=comp_number).fit(df) reduced_data = pca.transform(df) reduced_data = pd.DataFrame(reduced_data) print(pca.explained_variance_ratio_.sum()) return reduced_data x_red = reduce_data(x) x_test_red = reduce_data(x_test) x_train, x_val, y_train, y_val = train_test_split( x_red, Y, test_size=0.20, random_state=42, shuffle=True ) random = RandomForestClassifier() logreg = LogisticRegression() abc = AdaBoostClassifier() svm = SVC() gbc = GradientBoostingClassifier() xgb = XGBClassifier() gnb = GaussianNB() dt = DecisionTreeClassifier() class_list = [random, dt, svm, logreg, abc, gbc, xgb] for model in class_list: start = time.time() grid = GridSearchCV( estimator=model, param_grid={}, scoring="roc_auc", cv=5, n_jobs=-1, verbose=3 ) grid.fit(x_red, Y) end = time.time() print(model, "\n", grid.best_score_, "\n", round(end - start)) # param_grid = {'eta': [0.001,0.01, 0.1], # 'gamma': [0,0.5,1]} # kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=1) # best = GridSearchCV(estimator=XGBClassifier(), param_grid=param_grid, scoring='roc_auc', cv=kfold, n_jobs=-1,verbose=3) # best.fit(x_red, Y) # print(best.best_score_,best.best_estimator_) param_grid = { "n_estimators": [50, 100, 150, 200], "min_samples_split": [2, 3], "warm_start": [True, False], } kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=1) best = GridSearchCV( estimator=RandomForestClassifier(), param_grid=param_grid, scoring="roc_auc", cv=kfold, n_jobs=-1, verbose=3, ) best.fit(x_red, Y) print(best.best_score_, best.best_estimator_) param_grid = { "C": [0.01, 0.5, 1, 2, 5, 7, 9], "kernel": ["poly", "sigmoid", "linear", "rbf"], "degree": [2, 3, 4], "gamma": ["scale", "auto"], } kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=1) best = GridSearchCV( estimator=SVC(), param_grid=param_grid, scoring="roc_auc", cv=kfold, n_jobs=-1, verbose=3, ) best.fit(x_red, Y) print(best.best_score_, best.best_estimator_) final_model = XGBClassifier() final_model.fit(x_red, Y) final_predict = final_model.predict(x_test_red) submission = pd.DataFrame({"id": x_test.index, "target": final_predict}) submission[submission["target"] == 1] submission.to_csv("/kaggle/working/SmoteBalance_PCA_XGBoost", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/433/69433313.ipynb
null
null
[{"Id": 69433313, "ScriptId": 18944880, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4177452, "CreationDate": "07/31/2021 01:00:04", "VersionNumber": 6.0, "Title": "handle_overfitting", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 209.0, "LinesInsertedFromPrevious": 13.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 196.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt plt.style.use("bmh") import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split from sklearn.ensemble import ( RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ) from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from xgboost import XGBClassifier from sklearn.svm import SVC from sklearn.decomposition import PCA from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from sklearn.feature_selection import mutual_info_classif from sklearn.feature_selection import SelectKBest from imblearn.over_sampling import SMOTE from imblearn.under_sampling import RandomUnderSampler from imblearn.pipeline import Pipeline import time import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) df = pd.read_csv("/kaggle/input/dont-overfit-ii/train.csv", index_col="id") x_test = pd.read_csv("/kaggle/input/dont-overfit-ii/test.csv", index_col="id") display(df.head()) display(x_test.head()) display(df.describe().T) print(df.info()) print(df.select_dtypes("object")) print(df.shape) print(x_test.shape) df.isnull().sum() sns.catplot(x="target", kind="count", palette="ch:.25", data=df) # grid = sns.PairGrid(df.iloc[:,1:20]) # grid.map_offdiag(sns.scatterplot) # grid.map_diag(sns.histplot) # grid.add_legend() # # see percent of missing value print(df.isnull().sum()) titanic_null_count = df.isnull().sum() * 100 / len(df) miss_titanic_plot = titanic_null_count.sort_values(ascending=False)[:10].plot( kind="barh" ) print(titanic_null_count.sort_values(ascending=False)) # correlation=abs(df.corr()['target']) >= 0.4 # correlation[correlation==True] df.shape # **data has 0 missing values** Y = df["target"] x = df.drop(["target"], axis=1) over = SMOTE() under = RandomUnderSampler() steps = [("o", over), ("u", under)] pipeline = Pipeline(steps=steps) x, Y = pipeline.fit_resample(x, Y) print(x.shape) print(Y.shape) display(x.describe().T) sns.catplot(x=Y, kind="count", palette="ch:.25", data=df) # MI_score = mutual_info_classif(x, y, random_state=0) # fe oshkla fel satr ely gy msh 3aref azboto yrg3ha list of lists # features=[] # ll={} # feature_names = x.columns # # Print the name and mutual information score of each feature # for feature in zip(feature_names, MI_score): # features.append(feature) # print(features) # for i in range(len(features)): # ll[features[i][1]]=features[i][0] # for i in sorted (ll.keys()) : # print(i, end = " ") # skb = SelectKBest(score_func=mutual_info_classif, k=20) # new = pd.DataFrame(skb.fit_transform(x, y)) # print(new.shape) # new.head() scaler = StandardScaler() x_cols = x.columns x_test_cols = x_test.columns x = pd.DataFrame(scaler.fit_transform(x.astype(float)), columns=x_cols) x.index = x.index x_test = pd.DataFrame( scaler.fit_transform(x_test.astype(float)), columns=x_test_cols, index=x_test.index ) pca = PCA().fit(x_test) plt.rcParams["figure.figsize"] = (20, 10) fig, ax = plt.subplots() y = np.cumsum(pca.explained_variance_ratio_) plt.ylim(0.0, 1.1) plt.plot(y, marker="o", linestyle="--", color="b") plt.xlabel("Number of Components") plt.ylabel("Cumulative variance (%)") plt.title("The number of components needed to explain variance") plt.axhline(y=0.95, color="r", linestyle="-") plt.text(0.5, 0.85, "95% cut-off threshold", color="red", fontsize=16) ax.grid(axis="x") plt.show() def reduce_data(df, comp_number=280): pca = PCA(n_components=comp_number).fit(df) reduced_data = pca.transform(df) reduced_data = pd.DataFrame(reduced_data) print(pca.explained_variance_ratio_.sum()) return reduced_data x_red = reduce_data(x) x_test_red = reduce_data(x_test) x_train, x_val, y_train, y_val = train_test_split( x_red, Y, test_size=0.20, random_state=42, shuffle=True ) random = RandomForestClassifier() logreg = LogisticRegression() abc = AdaBoostClassifier() svm = SVC() gbc = GradientBoostingClassifier() xgb = XGBClassifier() gnb = GaussianNB() dt = DecisionTreeClassifier() class_list = [random, dt, svm, logreg, abc, gbc, xgb] for model in class_list: start = time.time() grid = GridSearchCV( estimator=model, param_grid={}, scoring="roc_auc", cv=5, n_jobs=-1, verbose=3 ) grid.fit(x_red, Y) end = time.time() print(model, "\n", grid.best_score_, "\n", round(end - start)) # param_grid = {'eta': [0.001,0.01, 0.1], # 'gamma': [0,0.5,1]} # kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=1) # best = GridSearchCV(estimator=XGBClassifier(), param_grid=param_grid, scoring='roc_auc', cv=kfold, n_jobs=-1,verbose=3) # best.fit(x_red, Y) # print(best.best_score_,best.best_estimator_) param_grid = { "n_estimators": [50, 100, 150, 200], "min_samples_split": [2, 3], "warm_start": [True, False], } kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=1) best = GridSearchCV( estimator=RandomForestClassifier(), param_grid=param_grid, scoring="roc_auc", cv=kfold, n_jobs=-1, verbose=3, ) best.fit(x_red, Y) print(best.best_score_, best.best_estimator_) param_grid = { "C": [0.01, 0.5, 1, 2, 5, 7, 9], "kernel": ["poly", "sigmoid", "linear", "rbf"], "degree": [2, 3, 4], "gamma": ["scale", "auto"], } kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=1) best = GridSearchCV( estimator=SVC(), param_grid=param_grid, scoring="roc_auc", cv=kfold, n_jobs=-1, verbose=3, ) best.fit(x_red, Y) print(best.best_score_, best.best_estimator_) final_model = XGBClassifier() final_model.fit(x_red, Y) final_predict = final_model.predict(x_test_red) submission = pd.DataFrame({"id": x_test.index, "target": final_predict}) submission[submission["target"] == 1] submission.to_csv("/kaggle/working/SmoteBalance_PCA_XGBoost", index=False)
false
0
2,182
0
2,182
2,182
69444908
# # Use BlackJAX with PyMC3 # Author: Kaustubh Chaudhari # BlackJAX can take any log-probability function as long as it is compatible with JAX's JIT. In this notebook we show how we can use PyMC as a modeling language and BlackJAX as an inference library. # For this notebook to run you will need to install PyMC3: # ```bash # pip install pymc3 # ``` import jax import numpy as np import pymc3 as pm import pymc3.sampling_jax import blackjax.nuts as nuts import blackjax.stan_warmup as stan_warmup print(f"Running on PyMC3 v{pm.__version__}") # ## Data # Please refer to the [original TFP example](https://www.tensorflow.org/probability/examples/Eight_Schools) for a description of the problem and the model that is used. # Data of the Eight Schools Model J = 8 y = np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]) sigma = np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]) # # Model # with pm.Model() as model: mu = pm.Normal("mu", mu=0.0, sigma=10.0) tau = pm.HalfCauchy("tau", 5.0) theta = pm.Normal("theta", mu=0, sigma=1, shape=J) theta_1 = mu + tau * theta obs = pm.Normal("obs", mu=theta_1, sigma=sigma, shape=J, observed=y) # # Sampling using native numpyro import numpyro import numpyro.distributions as dist # Eight Schools example def eight_schools(J, sigma, y=None): mu = numpyro.sample("mu", dist.Normal(0, 10)) tau = numpyro.sample("tau", dist.HalfCauchy(5)) theta = numpyro.sample("theta", dist.Normal(0, 1)) with numpyro.plate("J", J): theta_1 = mu + tau * theta numpyro.sample("obs", dist.Normal(theta_1, sigma), obs=y) from jax import random from numpyro.infer import MCMC, NUTS nuts_kernel = NUTS(eight_schools) mcmc = MCMC(nuts_kernel, num_warmup=1000, num_samples=50000) rng_key = random.PRNGKey(0) mcmc.run(rng_key, J, sigma, y=y, extra_fields=("potential_energy",)) # # Sampling using PyMC NUTS Sampler with model: posterior = pm.sample(50_000, chains=1) # # Sampling using PyMC JAX Numpyro NUTS sampler # with model: # hierarchical_trace_jax = pm.sampling_jax.sample_numpyro_nuts( # 50_000, target_accept=0.9, chains=1 # ) # # Sampling using BlackJax # ## Configuring the model for BlackJax # from theano.graph.fg import FunctionGraph from theano.link.jax.jax_dispatch import jax_funcify seed = jax.random.PRNGKey(1234) chains = 1 # Get the FunctionGraph of the model. fgraph = FunctionGraph(model.free_RVs, [model.logpt]) # Jax funcify builds Jax variant of the FunctionGraph. fns = jax_funcify(fgraph) logp_fn_jax = fns[0] # Now we build a Jax variant of the initial state/inputs to the model. rv_names = [rv.name for rv in model.free_RVs] init_state = [model.test_point[rv_name] for rv_name in rv_names] init_state_batched = jax.tree_map( lambda x: np.repeat(x[None, ...], chains, axis=0), init_state ) # Then we transform the Jaxified input and FunctionGraph to a BlackJax NUTS sampler potential = lambda x: -logp_fn_jax(*x) initial_position = init_state initial_state = nuts.new_state(initial_position, potential) # ## Sampling kernel_factory = lambda step_size, inverse_mass_matrix: nuts.kernel( potential, step_size, inverse_mass_matrix ) last_state, (step_size, inverse_mass_matrix), _ = stan_warmup.run( seed, kernel_factory, initial_state, 1000 ) def inference_loop(rng_key, kernel, initial_state, num_samples): def one_step(state, rng_key): state, info = kernel(rng_key, state) return state, (state, info) keys = jax.random.split(rng_key, num_samples) _, (states, infos) = jax.lax.scan(one_step, initial_state, keys) return states, infos # Build the kernel using the step size and inverse mass matrix returned from the window adaptation kernel = kernel_factory(step_size, inverse_mass_matrix) # Sample from the posterior distribution states, infos = inference_loop(seed, kernel, last_state, 50_000)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/444/69444908.ipynb
null
null
[{"Id": 69444908, "ScriptId": 18961076, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2345063, "CreationDate": "07/31/2021 05:29:03", "VersionNumber": 5.0, "Title": "BlackJAX test", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 144.0, "LinesInsertedFromPrevious": 24.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 120.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
null
null
null
null
# # Use BlackJAX with PyMC3 # Author: Kaustubh Chaudhari # BlackJAX can take any log-probability function as long as it is compatible with JAX's JIT. In this notebook we show how we can use PyMC as a modeling language and BlackJAX as an inference library. # For this notebook to run you will need to install PyMC3: # ```bash # pip install pymc3 # ``` import jax import numpy as np import pymc3 as pm import pymc3.sampling_jax import blackjax.nuts as nuts import blackjax.stan_warmup as stan_warmup print(f"Running on PyMC3 v{pm.__version__}") # ## Data # Please refer to the [original TFP example](https://www.tensorflow.org/probability/examples/Eight_Schools) for a description of the problem and the model that is used. # Data of the Eight Schools Model J = 8 y = np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]) sigma = np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]) # # Model # with pm.Model() as model: mu = pm.Normal("mu", mu=0.0, sigma=10.0) tau = pm.HalfCauchy("tau", 5.0) theta = pm.Normal("theta", mu=0, sigma=1, shape=J) theta_1 = mu + tau * theta obs = pm.Normal("obs", mu=theta_1, sigma=sigma, shape=J, observed=y) # # Sampling using native numpyro import numpyro import numpyro.distributions as dist # Eight Schools example def eight_schools(J, sigma, y=None): mu = numpyro.sample("mu", dist.Normal(0, 10)) tau = numpyro.sample("tau", dist.HalfCauchy(5)) theta = numpyro.sample("theta", dist.Normal(0, 1)) with numpyro.plate("J", J): theta_1 = mu + tau * theta numpyro.sample("obs", dist.Normal(theta_1, sigma), obs=y) from jax import random from numpyro.infer import MCMC, NUTS nuts_kernel = NUTS(eight_schools) mcmc = MCMC(nuts_kernel, num_warmup=1000, num_samples=50000) rng_key = random.PRNGKey(0) mcmc.run(rng_key, J, sigma, y=y, extra_fields=("potential_energy",)) # # Sampling using PyMC NUTS Sampler with model: posterior = pm.sample(50_000, chains=1) # # Sampling using PyMC JAX Numpyro NUTS sampler # with model: # hierarchical_trace_jax = pm.sampling_jax.sample_numpyro_nuts( # 50_000, target_accept=0.9, chains=1 # ) # # Sampling using BlackJax # ## Configuring the model for BlackJax # from theano.graph.fg import FunctionGraph from theano.link.jax.jax_dispatch import jax_funcify seed = jax.random.PRNGKey(1234) chains = 1 # Get the FunctionGraph of the model. fgraph = FunctionGraph(model.free_RVs, [model.logpt]) # Jax funcify builds Jax variant of the FunctionGraph. fns = jax_funcify(fgraph) logp_fn_jax = fns[0] # Now we build a Jax variant of the initial state/inputs to the model. rv_names = [rv.name for rv in model.free_RVs] init_state = [model.test_point[rv_name] for rv_name in rv_names] init_state_batched = jax.tree_map( lambda x: np.repeat(x[None, ...], chains, axis=0), init_state ) # Then we transform the Jaxified input and FunctionGraph to a BlackJax NUTS sampler potential = lambda x: -logp_fn_jax(*x) initial_position = init_state initial_state = nuts.new_state(initial_position, potential) # ## Sampling kernel_factory = lambda step_size, inverse_mass_matrix: nuts.kernel( potential, step_size, inverse_mass_matrix ) last_state, (step_size, inverse_mass_matrix), _ = stan_warmup.run( seed, kernel_factory, initial_state, 1000 ) def inference_loop(rng_key, kernel, initial_state, num_samples): def one_step(state, rng_key): state, info = kernel(rng_key, state) return state, (state, info) keys = jax.random.split(rng_key, num_samples) _, (states, infos) = jax.lax.scan(one_step, initial_state, keys) return states, infos # Build the kernel using the step size and inverse mass matrix returned from the window adaptation kernel = kernel_factory(step_size, inverse_mass_matrix) # Sample from the posterior distribution states, infos = inference_loop(seed, kernel, last_state, 50_000)
false
0
1,373
1
1,373
1,373
69444623
<jupyter_start><jupyter_text>Hitters ### Context This dataset is part of the R-package ISLR and is used in the related book by G. James et al. (2013) "An Introduction to Statistical Learning with applications in R" to demonstrate how Ridge regression and the LASSO are performed using R. ### Content This dataset was originally taken from the StatLib library which is maintained at Carnegie Mellon University. This is part of the data that was used in the 1988 ASA Graphics Section Poster Session. The salary data were originally from Sports Illustrated, April 20, 1987. The 1986 and career statistics were obtained from The 1987 Baseball Encyclopedia Update published by Collier Books, Macmillan Publishing Company, New York. Format A data frame with 322 observations of major league players on the following 20 variables. AtBat Number of times at bat in 1986 Hits Number of hits in 1986 HmRun Number of home runs in 1986 Runs Number of runs in 1986 RBI Number of runs batted in in 1986 Walks Number of walks in 1986 Years Number of years in the major leagues CAtBat Number of times at bat during his career CHits Number of hits during his career CHmRun Number of home runs during his career CRuns Number of runs during his career CRBI Number of runs batted in during his career CWalks Number of walks during his career League A factor with levels A and N indicating player’s league at the end of 1986 Division A factor with levels E and W indicating player’s division at the end of 1986 PutOuts Number of put outs in 1986 Assists Number of assists in 1986 Errors Number of errors in 1986 Salary 1987 annual salary on opening day in thousands of dollars NewLeague A factor with levels A and N indicating player’s league at the beginning of 1987 Kaggle dataset identifier: hitters <jupyter_code>import pandas as pd df = pd.read_csv('hitters/Hitters.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 322 entries, 0 to 321 Data columns (total 20 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 AtBat 322 non-null int64 1 Hits 322 non-null int64 2 HmRun 322 non-null int64 3 Runs 322 non-null int64 4 RBI 322 non-null int64 5 Walks 322 non-null int64 6 Years 322 non-null int64 7 CAtBat 322 non-null int64 8 CHits 322 non-null int64 9 CHmRun 322 non-null int64 10 CRuns 322 non-null int64 11 CRBI 322 non-null int64 12 CWalks 322 non-null int64 13 League 322 non-null object 14 Division 322 non-null object 15 PutOuts 322 non-null int64 16 Assists 322 non-null int64 17 Errors 322 non-null int64 18 Salary 263 non-null float64 19 NewLeague 322 non-null object dtypes: float64(1), int64(16), object(3) memory usage: 50.4+ KB <jupyter_text>Examples: { "AtBat": 293, "Hits": 66, "HmRun": 1, "Runs": 30, "RBI": 29, "Walks": 14, "Years": 1, "CAtBat": 293, "CHits": 66, "CHmRun": 1, "CRuns": 30, "CRBI": 29, "CWalks": 14, "League": "A", "Division": "E", "PutOuts": 446, "Assists": 33, "Errors": 20, "Salary": NaN, "NewLeague": "A" } { "AtBat": 315, "Hits": 81, "HmRun": 7, "Runs": 24, "RBI": 38, "Walks": 39, "Years": 14, "CAtBat": 3449, "CHits": 835, "CHmRun": 69, "CRuns": 321, "CRBI": 414, "CWalks": 375, "League": "N", "Division": "W", "PutOuts": 632, "Assists": 43, "Errors": 10, "Salary": 475.0, "NewLeague": "N" } { "AtBat": 479, "Hits": 130, "HmRun": 18, "Runs": 66, "RBI": 72, "Walks": 76, "Years": 3, "CAtBat": 1624, "CHits": 457, "CHmRun": 63, "CRuns": 224, "CRBI": 266, "CWalks": 263, "League": "A", "Division": "W", "PutOuts": 880, "Assists": 82, "Errors": 14, "Salary": 480.0, "NewLeague": "A" } { "AtBat": 496, "Hits": 141, "HmRun": 20, "Runs": 65, "RBI": 78, "Walks": 37, "Years": 11, "CAtBat": 5628, "CHits": 1575, "CHmRun": 225, "CRuns": 828, "CRBI": 838, "CWalks": 354, "League": "N", "Division": "E", "PutOuts": 200, "Assists": 11, "Errors": 3, "Salary": 500.0, "NewLeague": "N" } <jupyter_script># **Created by Berkay Alan** # **Non-Linear Models - Regression | Ensemble Learning - Bagging - Random Forests** # **31 July 2021** # **Content** # - Ensemble Learning - Bagged Trees(Bagging) (Theory - Model- Tuning) # - Ensemble Learning - Random Forests (Theory - Model- Tuning) # ** Check out My Github for other Regression Models ** # Github Repository Including: # # - K - Nearest Neighbors(KNN) (Theory - Model- Tuning) # - Support Vector Regression(SVR) (Theory - Model- Tuning) # - Non-Linear Support Vector Regression(SVR) (Theory - Model- Tuning) # - Regression(Decision) Trees (CART) (Theory - Model- Tuning) # - Gradient Boosting Machines(GBM) (Theory - Model- Tuning) # - Light Gradient Boosting Machines(LGBM) (Theory - Model- Tuning) # - XGBoost(Extreme Gradient Boosting) (Theory - Model- Tuning) # - Catboost (Theory - Model- Tuning) # # Check it out: https://github.com/berkayalan/Data-Science-Tutorials/blob/master/Non-Linear%20Models%20-%20Regression.ipynb # **For more Tutorial:** https://github.com/berkayalan # ## Resources # - **The Elements of Statistical Learning** - Trevor Hastie, Robert Tibshirani, Jerome Friedman - Data Mining, Inference, and Prediction (Springer Series in Statistics) # - [**Classification And Regression Trees for Machine Learning**](https://machinelearningmastery.com/classification-and-regression-trees-for-machine-learning/) # - [**Regression Trees by Statquest**](https://www.youtube.com/watch?v=g9c66TUylZ4&ab_channel=StatQuestwithJoshStarmer) # - [**Decision Tree Algorithm, Explained**](https://www.kdnuggets.com/2020/01/decision-tree-algorithm-explained.html) # - [**Ensemble methods: bagging, boosting and stacking**](https://towardsdatascience.com/ensemble-methods-bagging-boosting-and-stacking-c9214a10a205) # - [**Random Forests by Statquest**](https://www.youtube.com/watch?v=J4Wdy0Wc_xQ&ab_channel=StatQuestwithJoshStarmer) # - [**Why random forests outperform decision trees?**](https://towardsdatascience.com/why-random-forests-outperform-decision-trees-1b0f175a0b5) # ## Importing Libraries from warnings import filterwarnings filterwarnings("ignore") import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm import statsmodels.formula.api as smf from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import ( train_test_split, cross_val_score, cross_val_predict, ShuffleSplit, GridSearchCV, ) from sklearn.decomposition import PCA from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier from sklearn.preprocessing import scale from sklearn import model_selection from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import ( BaggingRegressor, RandomForestRegressor, BaseEnsemble, GradientBoostingRegressor, ) import astor import time # ## Ensemble Learning - Bagged Trees (Bagging) # ### Theory # In order to understand Bagged trees, first we need to get familiar with **Ensemble Learning**. # Ensemble learning gives credence to the idea of the “wisdom of crowds,” which suggests that the decision-making of a larger group of people is typically better than that of an individual expert. Similarly, ensemble learning refers to a group (or ensemble) of base learners, or models, which work collectively to achieve a better final prediction. A single model, also known as a base or weak learner, may not perform well individually due to high variance or high bias. However, when weak learners are aggregated, they can form a strong learner, as their combination reduces bias or variance, yielding better model performance. # Ensemble methods are frequently illustrated using decision trees as this algorithm can be prone to overfitting (high variance and low bias) when it hasn’t been pruned and it can also lend itself to underfitting (low variance and high bias) when it’s very small, like a decision stump, which is a decision tree with one level. Remember, when an algorithm overfits or underfits to its training set, it cannot generalize well to new datasets, so ensemble methods are used to counteract this behavior to allow for generalization of the model to new datasets. # As an example of an Ensemble method, we can train a group of Decision Tree classifiers, each on a different random subset of the training set. To make predictions, we obtain the predictions of all the individual trees, then predict the class that gets the most votes. # ![Screen%20Shot%202021-07-24%20at%2015.43.45.png](attachment:Screen%20Shot%202021-07-24%20at%2015.43.45.png) # Photo is cited by this book: Hands-On Machine Learning with Scikit-Learn & TensorFlow # Bootstrap aggregating(bagging), is a ensemble meta-algorithm designed to improve the stability and accuracy of machine learning algorithms used in statistical classification and regression. It also reduces variance and helps to avoid overfitting. Although it is usually applied to decision tree methods, it can be used with any type of method. Bagging is a special case of the model averaging approach. # In bagging, a random sample of data in a training set is selected with replacement—meaning that the individual data points can be chosen more than once. After several data samples are generated, these weak models are then trained independently, and depending on the type of task—regression or classification, for example—the average or majority of those predictions yield a more accurate estimate. Bagging allows training instances to be sampled several times across multiple predictors. # Once all predictors are trained, the ensemble can make a prediction for a new instance by simply aggregating the predictions of all predictors. The aggregation function is typically the statistical mode (the most frequent prediction, just like a hard voting classifier) for classification, or the average for regression. Each individual predictor has a higher bias than if it were trained on the original training set, but aggregation reduces both bias and variance. # For this section, I highly recommend you to read this well explained article: [**Ensemble methods: bagging, boosting and stacking**](https://towardsdatascience.com/ensemble-methods-bagging-boosting-and-stacking-c9214a10a205) # ### Model # For a real world example, we will work with **Hitters** dataset. # It can be downloaded here: https://www.kaggle.com/floser/hitters hts = pd.read_csv("../input/hitters/Hitters.csv") hts.head() # Now we will remove NA values. hts.dropna(inplace=True) # We will do **One Hot Encoding** to categorical columns. one_hot_encoded = pd.get_dummies(hts[["League", "Division", "NewLeague"]]) one_hot_encoded.head() new_hts = hts.drop(["League", "Division", "NewLeague", "Salary"], axis=1).astype( "float64" ) X = pd.concat( [new_hts, one_hot_encoded[["League_N", "Division_W", "NewLeague_N"]]], axis=1 ) X.head() y = hts.Salary # Target-dependent variable # Now we will split our dataset as train and test set. hts.shape # Independent Variables X.shape # Dependent Variables y.shape X_train = X.iloc[:210] X_test = X.iloc[210:] y_train = y[:210] y_test = y[210:] print("X_train Shape: ", X_train.shape) print("X_test Shape: ", X_test.shape) print("y_train Shape: ", y_train.shape) print("y_test Shape: ", y_test.shape) bagging_model = BaggingRegressor(bootstrap_features=True).fit(X_train, y_train) # Number of Trees bagging_model.n_estimators # 10 different tree bagging_model.estimators_ # Samples in each tree bagging_model.estimators_samples_[:1] # Independent Variables in each tree bagging_model.estimators_features_ # ### Prediction bagging_model y_pred = bagging_model.predict(X_train) # Train Error np.sqrt(mean_squared_error(y_train, y_pred)) r2_score(y_train, y_pred) y_pred = bagging_model.predict(X_test) # Test Error np.sqrt(mean_squared_error(y_test, y_pred)) r2_score(y_test, y_pred) # Let's check each tree independently. second_tree = bagging_model.estimators_[1].fit(X_train, y_train).predict(X_test) # Test Error for second tree np.sqrt(mean_squared_error(y_test, second_tree)) fourth_tree = bagging_model.estimators_[3].fit(X_train, y_train).predict(X_test) # Test Error for fourth tree np.sqrt(mean_squared_error(y_test, fourth_tree)) # ### Model Tuning bagging_model bagging_params = {"n_estimators": range(1, 30)} bagging_cv_model = GridSearchCV(bagging_model, bagging_params, cv=15).fit( X_train, y_train ) bagging_cv_model.best_params_ tuned_bagging_model = BaggingRegressor( n_estimators=bagging_cv_model.best_params_["n_estimators"] ).fit(X_train, y_train) tuned_bagging_model y_pred = tuned_bagging_model.predict(X_train) # Train Error np.sqrt(mean_squared_error(y_train, y_pred)) r2_score(y_train, y_pred) y_pred = tuned_bagging_model.predict(X_test) # Test Error np.sqrt(mean_squared_error(y_test, y_pred)) r2_score(y_test, y_pred) # ## Ensemble Learning - Random Forests # ### Theory # Random Forest is also an example of ensemble learning, in which we combine multiple machine learning algorithms to obtain better predictive performance. # The random forest algorithm is an extension of the bagging method as it utilizes both bagging and feature randomness to create an uncorrelated forest of decision trees. Feature randomness, also known as feature bagging or “the random subspace method”, generates a random subset of features, which ensures low correlation among decision trees. This is a key difference between decision trees and random forests. While decision trees consider all the possible feature splits, random forests only select a subset of those features. # Let's try to understand with an example. For example, I want to watch a movie today and I am not sure what to watch. After calling one of my best friends, she recommend a movie to me according to my old preferences that she know. At this point, my old preferences are training set for her. It's a classical decision tree. But if I would get recommendations from my 20 different friends and select most voted movie, that would be **Random Forests**. # Random forest algorithms have three main hyperparameters, which need to be set before training. These include node size, the number of trees, and the number of features sampled. From there, the random forest classifier can be used to solve for regression or classification problems. # ![image-2.png](attachment:image-2.png) # Photo is cited by: https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.analyticsvidhya.com%2Fblog%2F2020%2F05%2Fdecision-tree-vs-random-forest-algorithm%2F&psig=AOvVaw2jevf2JFgvEKCBieh5yaHX&ust=1627289101408000&source=images&cd=vfe&ved=0CAsQjRxqFwoTCICY4bvq_fECFQAAAAAdAAAAABAD # Yet another great quality of Random Forests is that they make it easy to measure the relative importance of each feature. Scikit-Learn measures a feature’s importance by looking at how much the tree nodes that use that feature reduce impurity on average (across all trees in the forest). More precisely, it is a weighted average, where each node’s weight is equal to the number of training samples that are associated with it. # Another advantage of sampling over the features is that it makes the decision making process more robust to missing data: observations (from the training dataset or not) with missing data can still be regressed or classified based on the trees that take into account only features where data are not missing. Thus, random forest algorithm combines the concepts of bagging and random feature subspace selection to create more robust models. # ### Model # For a real world example, we will work with **Hitters** dataset. # It can be downloaded here: https://www.kaggle.com/floser/hitters hts = pd.read_csv("../input/hitters/Hitters.csv") hts.head() # Now we will remove NA values. hts.dropna(inplace=True) # We will do **One Hot Encoding** to categorical columns. one_hot_encoded = pd.get_dummies(hts[["League", "Division", "NewLeague"]]) one_hot_encoded.head() new_hts = hts.drop(["League", "Division", "NewLeague", "Salary"], axis=1).astype( "float64" ) X = pd.concat( [new_hts, one_hot_encoded[["League_N", "Division_W", "NewLeague_N"]]], axis=1 ) X.head() y = hts.Salary # Target-dependent variable # Now we will split our dataset as train and test set. hts.shape # Independent Variables X.shape # Dependent Variables y.shape X_train = X.iloc[:210] X_test = X.iloc[210:] y_train = y[:210] y_test = y[210:] print("X_train Shape: ", X_train.shape) print("X_test Shape: ", X_test.shape) print("y_train Shape: ", y_train.shape) print("y_test Shape: ", y_test.shape) random_forests = RandomForestRegressor(random_state=60).fit(X_train, y_train) random_forests random_forests.max_features # Number of Trees random_forests.n_estimators random_forests.min_samples_leaf random_forests.min_samples_split # Let's look at the importances of features. random_forests.feature_importances_ std = np.std([tree.feature_importances_ for tree in random_forests.estimators_], axis=0) fig, ax = plt.subplots() pd.Series(random_forests.feature_importances_, index=[X_train.columns]).plot.bar( yerr=std, ax=ax ) ax.set_title("Feature importances using MDI") ax.set_ylabel("Feature Imporances") fig.tight_layout() # ### Prediction random_forests y_pred = random_forests.predict(X_train) # Train Error np.sqrt(mean_squared_error(y_train, y_pred)) r2_score(y_train, y_pred) y_pred = random_forests.predict(X_test) # Test Error np.sqrt(mean_squared_error(y_test, y_pred)) r2_score(y_test, y_pred) # ### Model Tuning random_forests random_forests_params = { "max_depth": list(range(1, 20)), "max_features": [2, 5, 8, 11, 16], "n_estimators": [300, 500, 1000, 1700], } random_forests = RandomForestRegressor(random_state=60) cv_random_forests = GridSearchCV(random_forests, random_forests_params, cv=7, n_jobs=-1) # It takes nearly 10 minutes start_time = time.time() cv_random_forests.fit(X_train, y_train) elapsed_time = time.time() - start_time print(f"Elapsed time for cross validation: " f"{elapsed_time:.3f} seconds") cv_random_forests.best_params_ random_forests_tuned = RandomForestRegressor( max_depth=cv_random_forests.best_params_["max_depth"], max_features=cv_random_forests.best_params_["max_features"], n_estimators=cv_random_forests.best_params_["n_estimators"], ).fit(X_train, y_train) random_forests_tuned y_pred = random_forests_tuned.predict(X_train) # Train Error np.sqrt(mean_squared_error(y_train, y_pred)) r2_score(y_train, y_pred) y_pred = random_forests_tuned.predict(X_test) # Test Error np.sqrt(mean_squared_error(y_test, y_pred)) r2_score(y_test, y_pred) # Let's look at the importances of features. Importances = pd.DataFrame( {"Importance": random_forests_tuned.feature_importances_ * 100}, index=X_train.columns, ) Importances Importances.sort_values(by="Importance", axis=0, ascending=True).plot( kind="barh", color="b" ) plt.xlabel("Feature Importances") plt.ylabel("Features") plt.title("Feature Importances")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/444/69444623.ipynb
hitters
floser
[{"Id": 69444623, "ScriptId": 18962116, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3264446, "CreationDate": "07/31/2021 05:23:59", "VersionNumber": 1.0, "Title": "Non-Linear Models - Regression | Ensemble Learning", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 390.0, "LinesInsertedFromPrevious": 390.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92621026, "KernelVersionId": 69444623, "SourceDatasetVersionId": 17368}]
[{"Id": 17368, "DatasetId": 12722, "DatasourceVersionId": 17368, "CreatorUserId": 1627126, "LicenseName": "Other (specified in description)", "CreationDate": "02/11/2018 20:43:51", "VersionNumber": 1.0, "Title": "Hitters", "Slug": "hitters", "Subtitle": "Major League Baseball Data from the 1986 and 1987 seasons.", "Description": "### Context\n\nThis dataset is part of the R-package ISLR and is used in the related book by G. James et al. (2013) \"An Introduction to Statistical Learning with applications in R\" to demonstrate how Ridge regression and the LASSO are performed using R.\n\n\n\n### Content\n\nThis dataset was originally taken from the StatLib library which is maintained at Carnegie Mellon University. This is part of the data that was used in the 1988 ASA Graphics Section Poster Session. The salary data were originally from Sports Illustrated, April 20, 1987. The 1986 and career statistics were obtained from The 1987 Baseball Encyclopedia Update published by Collier Books, Macmillan Publishing Company, New York.\n\nFormat\nA data frame with 322 observations of major league players on the following 20 variables.\nAtBat Number of times at bat in 1986\nHits Number of hits in 1986\nHmRun Number of home runs in 1986\nRuns Number of runs in 1986\nRBI Number of runs batted in in 1986\nWalks Number of walks in 1986\nYears Number of years in the major leagues\nCAtBat Number of times at bat during his career\nCHits Number of hits during his career\nCHmRun Number of home runs during his career\nCRuns Number of runs during his career\nCRBI Number of runs batted in during his career\nCWalks Number of walks during his career\nLeague A factor with levels A and N indicating player\u2019s league at the end of 1986\nDivision A factor with levels E and W indicating player\u2019s division at the end of 1986\nPutOuts Number of put outs in 1986\nAssists Number of assists in 1986\nErrors Number of errors in 1986\nSalary 1987 annual salary on opening day in thousands of dollars\nNewLeague A factor with levels A and N indicating player\u2019s league at the beginning of 1987\n\n\n\n### Acknowledgements\n\nPlease cite/acknowledge: Games, G., Witten, D., Hastie, T., and Tibshirani, R. (2013) An Introduction to Statistical Learning with applications in R, www.StatLearning.com, Springer-Verlag, New York.\n\n\n\n### Inspiration\n\nThis upload shall enable actuarial kernels with R and Python", "VersionNotes": "Initial release", "TotalCompressedBytes": 20906.0, "TotalUncompressedBytes": 20906.0}]
[{"Id": 12722, "CreatorUserId": 1627126, "OwnerUserId": 1627126.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 17368.0, "CurrentDatasourceVersionId": 17368.0, "ForumId": 20189, "Type": 2, "CreationDate": "02/11/2018 20:43:51", "LastActivityDate": "02/11/2018", "TotalViews": 29105, "TotalDownloads": 3828, "TotalVotes": 18, "TotalKernels": 82}]
[{"Id": 1627126, "UserName": "floser", "DisplayName": "floser", "RegisterDate": "02/10/2018", "PerformanceTier": 1}]
# **Created by Berkay Alan** # **Non-Linear Models - Regression | Ensemble Learning - Bagging - Random Forests** # **31 July 2021** # **Content** # - Ensemble Learning - Bagged Trees(Bagging) (Theory - Model- Tuning) # - Ensemble Learning - Random Forests (Theory - Model- Tuning) # ** Check out My Github for other Regression Models ** # Github Repository Including: # # - K - Nearest Neighbors(KNN) (Theory - Model- Tuning) # - Support Vector Regression(SVR) (Theory - Model- Tuning) # - Non-Linear Support Vector Regression(SVR) (Theory - Model- Tuning) # - Regression(Decision) Trees (CART) (Theory - Model- Tuning) # - Gradient Boosting Machines(GBM) (Theory - Model- Tuning) # - Light Gradient Boosting Machines(LGBM) (Theory - Model- Tuning) # - XGBoost(Extreme Gradient Boosting) (Theory - Model- Tuning) # - Catboost (Theory - Model- Tuning) # # Check it out: https://github.com/berkayalan/Data-Science-Tutorials/blob/master/Non-Linear%20Models%20-%20Regression.ipynb # **For more Tutorial:** https://github.com/berkayalan # ## Resources # - **The Elements of Statistical Learning** - Trevor Hastie, Robert Tibshirani, Jerome Friedman - Data Mining, Inference, and Prediction (Springer Series in Statistics) # - [**Classification And Regression Trees for Machine Learning**](https://machinelearningmastery.com/classification-and-regression-trees-for-machine-learning/) # - [**Regression Trees by Statquest**](https://www.youtube.com/watch?v=g9c66TUylZ4&ab_channel=StatQuestwithJoshStarmer) # - [**Decision Tree Algorithm, Explained**](https://www.kdnuggets.com/2020/01/decision-tree-algorithm-explained.html) # - [**Ensemble methods: bagging, boosting and stacking**](https://towardsdatascience.com/ensemble-methods-bagging-boosting-and-stacking-c9214a10a205) # - [**Random Forests by Statquest**](https://www.youtube.com/watch?v=J4Wdy0Wc_xQ&ab_channel=StatQuestwithJoshStarmer) # - [**Why random forests outperform decision trees?**](https://towardsdatascience.com/why-random-forests-outperform-decision-trees-1b0f175a0b5) # ## Importing Libraries from warnings import filterwarnings filterwarnings("ignore") import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm import statsmodels.formula.api as smf from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import ( train_test_split, cross_val_score, cross_val_predict, ShuffleSplit, GridSearchCV, ) from sklearn.decomposition import PCA from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier from sklearn.preprocessing import scale from sklearn import model_selection from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import ( BaggingRegressor, RandomForestRegressor, BaseEnsemble, GradientBoostingRegressor, ) import astor import time # ## Ensemble Learning - Bagged Trees (Bagging) # ### Theory # In order to understand Bagged trees, first we need to get familiar with **Ensemble Learning**. # Ensemble learning gives credence to the idea of the “wisdom of crowds,” which suggests that the decision-making of a larger group of people is typically better than that of an individual expert. Similarly, ensemble learning refers to a group (or ensemble) of base learners, or models, which work collectively to achieve a better final prediction. A single model, also known as a base or weak learner, may not perform well individually due to high variance or high bias. However, when weak learners are aggregated, they can form a strong learner, as their combination reduces bias or variance, yielding better model performance. # Ensemble methods are frequently illustrated using decision trees as this algorithm can be prone to overfitting (high variance and low bias) when it hasn’t been pruned and it can also lend itself to underfitting (low variance and high bias) when it’s very small, like a decision stump, which is a decision tree with one level. Remember, when an algorithm overfits or underfits to its training set, it cannot generalize well to new datasets, so ensemble methods are used to counteract this behavior to allow for generalization of the model to new datasets. # As an example of an Ensemble method, we can train a group of Decision Tree classifiers, each on a different random subset of the training set. To make predictions, we obtain the predictions of all the individual trees, then predict the class that gets the most votes. # ![Screen%20Shot%202021-07-24%20at%2015.43.45.png](attachment:Screen%20Shot%202021-07-24%20at%2015.43.45.png) # Photo is cited by this book: Hands-On Machine Learning with Scikit-Learn & TensorFlow # Bootstrap aggregating(bagging), is a ensemble meta-algorithm designed to improve the stability and accuracy of machine learning algorithms used in statistical classification and regression. It also reduces variance and helps to avoid overfitting. Although it is usually applied to decision tree methods, it can be used with any type of method. Bagging is a special case of the model averaging approach. # In bagging, a random sample of data in a training set is selected with replacement—meaning that the individual data points can be chosen more than once. After several data samples are generated, these weak models are then trained independently, and depending on the type of task—regression or classification, for example—the average or majority of those predictions yield a more accurate estimate. Bagging allows training instances to be sampled several times across multiple predictors. # Once all predictors are trained, the ensemble can make a prediction for a new instance by simply aggregating the predictions of all predictors. The aggregation function is typically the statistical mode (the most frequent prediction, just like a hard voting classifier) for classification, or the average for regression. Each individual predictor has a higher bias than if it were trained on the original training set, but aggregation reduces both bias and variance. # For this section, I highly recommend you to read this well explained article: [**Ensemble methods: bagging, boosting and stacking**](https://towardsdatascience.com/ensemble-methods-bagging-boosting-and-stacking-c9214a10a205) # ### Model # For a real world example, we will work with **Hitters** dataset. # It can be downloaded here: https://www.kaggle.com/floser/hitters hts = pd.read_csv("../input/hitters/Hitters.csv") hts.head() # Now we will remove NA values. hts.dropna(inplace=True) # We will do **One Hot Encoding** to categorical columns. one_hot_encoded = pd.get_dummies(hts[["League", "Division", "NewLeague"]]) one_hot_encoded.head() new_hts = hts.drop(["League", "Division", "NewLeague", "Salary"], axis=1).astype( "float64" ) X = pd.concat( [new_hts, one_hot_encoded[["League_N", "Division_W", "NewLeague_N"]]], axis=1 ) X.head() y = hts.Salary # Target-dependent variable # Now we will split our dataset as train and test set. hts.shape # Independent Variables X.shape # Dependent Variables y.shape X_train = X.iloc[:210] X_test = X.iloc[210:] y_train = y[:210] y_test = y[210:] print("X_train Shape: ", X_train.shape) print("X_test Shape: ", X_test.shape) print("y_train Shape: ", y_train.shape) print("y_test Shape: ", y_test.shape) bagging_model = BaggingRegressor(bootstrap_features=True).fit(X_train, y_train) # Number of Trees bagging_model.n_estimators # 10 different tree bagging_model.estimators_ # Samples in each tree bagging_model.estimators_samples_[:1] # Independent Variables in each tree bagging_model.estimators_features_ # ### Prediction bagging_model y_pred = bagging_model.predict(X_train) # Train Error np.sqrt(mean_squared_error(y_train, y_pred)) r2_score(y_train, y_pred) y_pred = bagging_model.predict(X_test) # Test Error np.sqrt(mean_squared_error(y_test, y_pred)) r2_score(y_test, y_pred) # Let's check each tree independently. second_tree = bagging_model.estimators_[1].fit(X_train, y_train).predict(X_test) # Test Error for second tree np.sqrt(mean_squared_error(y_test, second_tree)) fourth_tree = bagging_model.estimators_[3].fit(X_train, y_train).predict(X_test) # Test Error for fourth tree np.sqrt(mean_squared_error(y_test, fourth_tree)) # ### Model Tuning bagging_model bagging_params = {"n_estimators": range(1, 30)} bagging_cv_model = GridSearchCV(bagging_model, bagging_params, cv=15).fit( X_train, y_train ) bagging_cv_model.best_params_ tuned_bagging_model = BaggingRegressor( n_estimators=bagging_cv_model.best_params_["n_estimators"] ).fit(X_train, y_train) tuned_bagging_model y_pred = tuned_bagging_model.predict(X_train) # Train Error np.sqrt(mean_squared_error(y_train, y_pred)) r2_score(y_train, y_pred) y_pred = tuned_bagging_model.predict(X_test) # Test Error np.sqrt(mean_squared_error(y_test, y_pred)) r2_score(y_test, y_pred) # ## Ensemble Learning - Random Forests # ### Theory # Random Forest is also an example of ensemble learning, in which we combine multiple machine learning algorithms to obtain better predictive performance. # The random forest algorithm is an extension of the bagging method as it utilizes both bagging and feature randomness to create an uncorrelated forest of decision trees. Feature randomness, also known as feature bagging or “the random subspace method”, generates a random subset of features, which ensures low correlation among decision trees. This is a key difference between decision trees and random forests. While decision trees consider all the possible feature splits, random forests only select a subset of those features. # Let's try to understand with an example. For example, I want to watch a movie today and I am not sure what to watch. After calling one of my best friends, she recommend a movie to me according to my old preferences that she know. At this point, my old preferences are training set for her. It's a classical decision tree. But if I would get recommendations from my 20 different friends and select most voted movie, that would be **Random Forests**. # Random forest algorithms have three main hyperparameters, which need to be set before training. These include node size, the number of trees, and the number of features sampled. From there, the random forest classifier can be used to solve for regression or classification problems. # ![image-2.png](attachment:image-2.png) # Photo is cited by: https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.analyticsvidhya.com%2Fblog%2F2020%2F05%2Fdecision-tree-vs-random-forest-algorithm%2F&psig=AOvVaw2jevf2JFgvEKCBieh5yaHX&ust=1627289101408000&source=images&cd=vfe&ved=0CAsQjRxqFwoTCICY4bvq_fECFQAAAAAdAAAAABAD # Yet another great quality of Random Forests is that they make it easy to measure the relative importance of each feature. Scikit-Learn measures a feature’s importance by looking at how much the tree nodes that use that feature reduce impurity on average (across all trees in the forest). More precisely, it is a weighted average, where each node’s weight is equal to the number of training samples that are associated with it. # Another advantage of sampling over the features is that it makes the decision making process more robust to missing data: observations (from the training dataset or not) with missing data can still be regressed or classified based on the trees that take into account only features where data are not missing. Thus, random forest algorithm combines the concepts of bagging and random feature subspace selection to create more robust models. # ### Model # For a real world example, we will work with **Hitters** dataset. # It can be downloaded here: https://www.kaggle.com/floser/hitters hts = pd.read_csv("../input/hitters/Hitters.csv") hts.head() # Now we will remove NA values. hts.dropna(inplace=True) # We will do **One Hot Encoding** to categorical columns. one_hot_encoded = pd.get_dummies(hts[["League", "Division", "NewLeague"]]) one_hot_encoded.head() new_hts = hts.drop(["League", "Division", "NewLeague", "Salary"], axis=1).astype( "float64" ) X = pd.concat( [new_hts, one_hot_encoded[["League_N", "Division_W", "NewLeague_N"]]], axis=1 ) X.head() y = hts.Salary # Target-dependent variable # Now we will split our dataset as train and test set. hts.shape # Independent Variables X.shape # Dependent Variables y.shape X_train = X.iloc[:210] X_test = X.iloc[210:] y_train = y[:210] y_test = y[210:] print("X_train Shape: ", X_train.shape) print("X_test Shape: ", X_test.shape) print("y_train Shape: ", y_train.shape) print("y_test Shape: ", y_test.shape) random_forests = RandomForestRegressor(random_state=60).fit(X_train, y_train) random_forests random_forests.max_features # Number of Trees random_forests.n_estimators random_forests.min_samples_leaf random_forests.min_samples_split # Let's look at the importances of features. random_forests.feature_importances_ std = np.std([tree.feature_importances_ for tree in random_forests.estimators_], axis=0) fig, ax = plt.subplots() pd.Series(random_forests.feature_importances_, index=[X_train.columns]).plot.bar( yerr=std, ax=ax ) ax.set_title("Feature importances using MDI") ax.set_ylabel("Feature Imporances") fig.tight_layout() # ### Prediction random_forests y_pred = random_forests.predict(X_train) # Train Error np.sqrt(mean_squared_error(y_train, y_pred)) r2_score(y_train, y_pred) y_pred = random_forests.predict(X_test) # Test Error np.sqrt(mean_squared_error(y_test, y_pred)) r2_score(y_test, y_pred) # ### Model Tuning random_forests random_forests_params = { "max_depth": list(range(1, 20)), "max_features": [2, 5, 8, 11, 16], "n_estimators": [300, 500, 1000, 1700], } random_forests = RandomForestRegressor(random_state=60) cv_random_forests = GridSearchCV(random_forests, random_forests_params, cv=7, n_jobs=-1) # It takes nearly 10 minutes start_time = time.time() cv_random_forests.fit(X_train, y_train) elapsed_time = time.time() - start_time print(f"Elapsed time for cross validation: " f"{elapsed_time:.3f} seconds") cv_random_forests.best_params_ random_forests_tuned = RandomForestRegressor( max_depth=cv_random_forests.best_params_["max_depth"], max_features=cv_random_forests.best_params_["max_features"], n_estimators=cv_random_forests.best_params_["n_estimators"], ).fit(X_train, y_train) random_forests_tuned y_pred = random_forests_tuned.predict(X_train) # Train Error np.sqrt(mean_squared_error(y_train, y_pred)) r2_score(y_train, y_pred) y_pred = random_forests_tuned.predict(X_test) # Test Error np.sqrt(mean_squared_error(y_test, y_pred)) r2_score(y_test, y_pred) # Let's look at the importances of features. Importances = pd.DataFrame( {"Importance": random_forests_tuned.feature_importances_ * 100}, index=X_train.columns, ) Importances Importances.sort_values(by="Importance", axis=0, ascending=True).plot( kind="barh", color="b" ) plt.xlabel("Feature Importances") plt.ylabel("Features") plt.title("Feature Importances")
[{"hitters/Hitters.csv": {"column_names": "[\"AtBat\", \"Hits\", \"HmRun\", \"Runs\", \"RBI\", \"Walks\", \"Years\", \"CAtBat\", \"CHits\", \"CHmRun\", \"CRuns\", \"CRBI\", \"CWalks\", \"League\", \"Division\", \"PutOuts\", \"Assists\", \"Errors\", \"Salary\", \"NewLeague\"]", "column_data_types": "{\"AtBat\": \"int64\", \"Hits\": \"int64\", \"HmRun\": \"int64\", \"Runs\": \"int64\", \"RBI\": \"int64\", \"Walks\": \"int64\", \"Years\": \"int64\", \"CAtBat\": \"int64\", \"CHits\": \"int64\", \"CHmRun\": \"int64\", \"CRuns\": \"int64\", \"CRBI\": \"int64\", \"CWalks\": \"int64\", \"League\": \"object\", \"Division\": \"object\", \"PutOuts\": \"int64\", \"Assists\": \"int64\", \"Errors\": \"int64\", \"Salary\": \"float64\", \"NewLeague\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 322 entries, 0 to 321\nData columns (total 20 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 AtBat 322 non-null int64 \n 1 Hits 322 non-null int64 \n 2 HmRun 322 non-null int64 \n 3 Runs 322 non-null int64 \n 4 RBI 322 non-null int64 \n 5 Walks 322 non-null int64 \n 6 Years 322 non-null int64 \n 7 CAtBat 322 non-null int64 \n 8 CHits 322 non-null int64 \n 9 CHmRun 322 non-null int64 \n 10 CRuns 322 non-null int64 \n 11 CRBI 322 non-null int64 \n 12 CWalks 322 non-null int64 \n 13 League 322 non-null object \n 14 Division 322 non-null object \n 15 PutOuts 322 non-null int64 \n 16 Assists 322 non-null int64 \n 17 Errors 322 non-null int64 \n 18 Salary 263 non-null float64\n 19 NewLeague 322 non-null object \ndtypes: float64(1), int64(16), object(3)\nmemory usage: 50.4+ KB\n", "summary": "{\"AtBat\": {\"count\": 322.0, \"mean\": 380.92857142857144, \"std\": 153.40498147064488, \"min\": 16.0, \"25%\": 255.25, \"50%\": 379.5, \"75%\": 512.0, \"max\": 687.0}, \"Hits\": {\"count\": 322.0, \"mean\": 101.0248447204969, \"std\": 46.454741356766796, \"min\": 1.0, \"25%\": 64.0, \"50%\": 96.0, \"75%\": 137.0, \"max\": 238.0}, \"HmRun\": {\"count\": 322.0, \"mean\": 10.770186335403727, \"std\": 8.709037413827737, \"min\": 0.0, \"25%\": 4.0, \"50%\": 8.0, \"75%\": 16.0, \"max\": 40.0}, \"Runs\": {\"count\": 322.0, \"mean\": 50.909937888198755, \"std\": 26.02409548457972, \"min\": 0.0, \"25%\": 30.25, \"50%\": 48.0, \"75%\": 69.0, \"max\": 130.0}, \"RBI\": {\"count\": 322.0, \"mean\": 48.02795031055901, \"std\": 26.166894761424544, \"min\": 0.0, \"25%\": 28.0, \"50%\": 44.0, \"75%\": 64.75, \"max\": 121.0}, \"Walks\": {\"count\": 322.0, \"mean\": 38.74223602484472, \"std\": 21.63932655032488, \"min\": 0.0, \"25%\": 22.0, \"50%\": 35.0, \"75%\": 53.0, \"max\": 105.0}, \"Years\": {\"count\": 322.0, \"mean\": 7.444099378881988, \"std\": 4.926087269904596, \"min\": 1.0, \"25%\": 4.0, \"50%\": 6.0, \"75%\": 11.0, \"max\": 24.0}, \"CAtBat\": {\"count\": 322.0, \"mean\": 2648.6832298136646, \"std\": 2324.205870266538, \"min\": 19.0, \"25%\": 816.75, \"50%\": 1928.0, \"75%\": 3924.25, \"max\": 14053.0}, \"CHits\": {\"count\": 322.0, \"mean\": 717.5714285714286, \"std\": 654.4726274762833, \"min\": 4.0, \"25%\": 209.0, \"50%\": 508.0, \"75%\": 1059.25, \"max\": 4256.0}, \"CHmRun\": {\"count\": 322.0, \"mean\": 69.49068322981367, \"std\": 86.26606080180498, \"min\": 0.0, \"25%\": 14.0, \"50%\": 37.5, \"75%\": 90.0, \"max\": 548.0}, \"CRuns\": {\"count\": 322.0, \"mean\": 358.7950310559006, \"std\": 334.10588576614686, \"min\": 1.0, \"25%\": 100.25, \"50%\": 247.0, \"75%\": 526.25, \"max\": 2165.0}, \"CRBI\": {\"count\": 322.0, \"mean\": 330.11801242236027, \"std\": 333.2196169682779, \"min\": 0.0, \"25%\": 88.75, \"50%\": 220.5, \"75%\": 426.25, \"max\": 1659.0}, \"CWalks\": {\"count\": 322.0, \"mean\": 260.2391304347826, \"std\": 267.05808454363216, \"min\": 0.0, \"25%\": 67.25, \"50%\": 170.5, \"75%\": 339.25, \"max\": 1566.0}, \"PutOuts\": {\"count\": 322.0, \"mean\": 288.9378881987578, \"std\": 280.70461385993525, \"min\": 0.0, \"25%\": 109.25, \"50%\": 212.0, \"75%\": 325.0, \"max\": 1378.0}, \"Assists\": {\"count\": 322.0, \"mean\": 106.91304347826087, \"std\": 136.85487646596755, \"min\": 0.0, \"25%\": 7.0, \"50%\": 39.5, \"75%\": 166.0, \"max\": 492.0}, \"Errors\": {\"count\": 322.0, \"mean\": 8.040372670807454, \"std\": 6.368359079737258, \"min\": 0.0, \"25%\": 3.0, \"50%\": 6.0, \"75%\": 11.0, \"max\": 32.0}, \"Salary\": {\"count\": 263.0, \"mean\": 535.9258821292775, \"std\": 451.11868070253865, \"min\": 67.5, \"25%\": 190.0, \"50%\": 425.0, \"75%\": 750.0, \"max\": 2460.0}}", "examples": "{\"AtBat\":{\"0\":293,\"1\":315,\"2\":479,\"3\":496},\"Hits\":{\"0\":66,\"1\":81,\"2\":130,\"3\":141},\"HmRun\":{\"0\":1,\"1\":7,\"2\":18,\"3\":20},\"Runs\":{\"0\":30,\"1\":24,\"2\":66,\"3\":65},\"RBI\":{\"0\":29,\"1\":38,\"2\":72,\"3\":78},\"Walks\":{\"0\":14,\"1\":39,\"2\":76,\"3\":37},\"Years\":{\"0\":1,\"1\":14,\"2\":3,\"3\":11},\"CAtBat\":{\"0\":293,\"1\":3449,\"2\":1624,\"3\":5628},\"CHits\":{\"0\":66,\"1\":835,\"2\":457,\"3\":1575},\"CHmRun\":{\"0\":1,\"1\":69,\"2\":63,\"3\":225},\"CRuns\":{\"0\":30,\"1\":321,\"2\":224,\"3\":828},\"CRBI\":{\"0\":29,\"1\":414,\"2\":266,\"3\":838},\"CWalks\":{\"0\":14,\"1\":375,\"2\":263,\"3\":354},\"League\":{\"0\":\"A\",\"1\":\"N\",\"2\":\"A\",\"3\":\"N\"},\"Division\":{\"0\":\"E\",\"1\":\"W\",\"2\":\"W\",\"3\":\"E\"},\"PutOuts\":{\"0\":446,\"1\":632,\"2\":880,\"3\":200},\"Assists\":{\"0\":33,\"1\":43,\"2\":82,\"3\":11},\"Errors\":{\"0\":20,\"1\":10,\"2\":14,\"3\":3},\"Salary\":{\"0\":null,\"1\":475.0,\"2\":480.0,\"3\":500.0},\"NewLeague\":{\"0\":\"A\",\"1\":\"N\",\"2\":\"A\",\"3\":\"N\"}}"}}]
true
1
<start_data_description><data_path>hitters/Hitters.csv: <column_names> ['AtBat', 'Hits', 'HmRun', 'Runs', 'RBI', 'Walks', 'Years', 'CAtBat', 'CHits', 'CHmRun', 'CRuns', 'CRBI', 'CWalks', 'League', 'Division', 'PutOuts', 'Assists', 'Errors', 'Salary', 'NewLeague'] <column_types> {'AtBat': 'int64', 'Hits': 'int64', 'HmRun': 'int64', 'Runs': 'int64', 'RBI': 'int64', 'Walks': 'int64', 'Years': 'int64', 'CAtBat': 'int64', 'CHits': 'int64', 'CHmRun': 'int64', 'CRuns': 'int64', 'CRBI': 'int64', 'CWalks': 'int64', 'League': 'object', 'Division': 'object', 'PutOuts': 'int64', 'Assists': 'int64', 'Errors': 'int64', 'Salary': 'float64', 'NewLeague': 'object'} <dataframe_Summary> {'AtBat': {'count': 322.0, 'mean': 380.92857142857144, 'std': 153.40498147064488, 'min': 16.0, '25%': 255.25, '50%': 379.5, '75%': 512.0, 'max': 687.0}, 'Hits': {'count': 322.0, 'mean': 101.0248447204969, 'std': 46.454741356766796, 'min': 1.0, '25%': 64.0, '50%': 96.0, '75%': 137.0, 'max': 238.0}, 'HmRun': {'count': 322.0, 'mean': 10.770186335403727, 'std': 8.709037413827737, 'min': 0.0, '25%': 4.0, '50%': 8.0, '75%': 16.0, 'max': 40.0}, 'Runs': {'count': 322.0, 'mean': 50.909937888198755, 'std': 26.02409548457972, 'min': 0.0, '25%': 30.25, '50%': 48.0, '75%': 69.0, 'max': 130.0}, 'RBI': {'count': 322.0, 'mean': 48.02795031055901, 'std': 26.166894761424544, 'min': 0.0, '25%': 28.0, '50%': 44.0, '75%': 64.75, 'max': 121.0}, 'Walks': {'count': 322.0, 'mean': 38.74223602484472, 'std': 21.63932655032488, 'min': 0.0, '25%': 22.0, '50%': 35.0, '75%': 53.0, 'max': 105.0}, 'Years': {'count': 322.0, 'mean': 7.444099378881988, 'std': 4.926087269904596, 'min': 1.0, '25%': 4.0, '50%': 6.0, '75%': 11.0, 'max': 24.0}, 'CAtBat': {'count': 322.0, 'mean': 2648.6832298136646, 'std': 2324.205870266538, 'min': 19.0, '25%': 816.75, '50%': 1928.0, '75%': 3924.25, 'max': 14053.0}, 'CHits': {'count': 322.0, 'mean': 717.5714285714286, 'std': 654.4726274762833, 'min': 4.0, '25%': 209.0, '50%': 508.0, '75%': 1059.25, 'max': 4256.0}, 'CHmRun': {'count': 322.0, 'mean': 69.49068322981367, 'std': 86.26606080180498, 'min': 0.0, '25%': 14.0, '50%': 37.5, '75%': 90.0, 'max': 548.0}, 'CRuns': {'count': 322.0, 'mean': 358.7950310559006, 'std': 334.10588576614686, 'min': 1.0, '25%': 100.25, '50%': 247.0, '75%': 526.25, 'max': 2165.0}, 'CRBI': {'count': 322.0, 'mean': 330.11801242236027, 'std': 333.2196169682779, 'min': 0.0, '25%': 88.75, '50%': 220.5, '75%': 426.25, 'max': 1659.0}, 'CWalks': {'count': 322.0, 'mean': 260.2391304347826, 'std': 267.05808454363216, 'min': 0.0, '25%': 67.25, '50%': 170.5, '75%': 339.25, 'max': 1566.0}, 'PutOuts': {'count': 322.0, 'mean': 288.9378881987578, 'std': 280.70461385993525, 'min': 0.0, '25%': 109.25, '50%': 212.0, '75%': 325.0, 'max': 1378.0}, 'Assists': {'count': 322.0, 'mean': 106.91304347826087, 'std': 136.85487646596755, 'min': 0.0, '25%': 7.0, '50%': 39.5, '75%': 166.0, 'max': 492.0}, 'Errors': {'count': 322.0, 'mean': 8.040372670807454, 'std': 6.368359079737258, 'min': 0.0, '25%': 3.0, '50%': 6.0, '75%': 11.0, 'max': 32.0}, 'Salary': {'count': 263.0, 'mean': 535.9258821292775, 'std': 451.11868070253865, 'min': 67.5, '25%': 190.0, '50%': 425.0, '75%': 750.0, 'max': 2460.0}} <dataframe_info> RangeIndex: 322 entries, 0 to 321 Data columns (total 20 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 AtBat 322 non-null int64 1 Hits 322 non-null int64 2 HmRun 322 non-null int64 3 Runs 322 non-null int64 4 RBI 322 non-null int64 5 Walks 322 non-null int64 6 Years 322 non-null int64 7 CAtBat 322 non-null int64 8 CHits 322 non-null int64 9 CHmRun 322 non-null int64 10 CRuns 322 non-null int64 11 CRBI 322 non-null int64 12 CWalks 322 non-null int64 13 League 322 non-null object 14 Division 322 non-null object 15 PutOuts 322 non-null int64 16 Assists 322 non-null int64 17 Errors 322 non-null int64 18 Salary 263 non-null float64 19 NewLeague 322 non-null object dtypes: float64(1), int64(16), object(3) memory usage: 50.4+ KB <some_examples> {'AtBat': {'0': 293, '1': 315, '2': 479, '3': 496}, 'Hits': {'0': 66, '1': 81, '2': 130, '3': 141}, 'HmRun': {'0': 1, '1': 7, '2': 18, '3': 20}, 'Runs': {'0': 30, '1': 24, '2': 66, '3': 65}, 'RBI': {'0': 29, '1': 38, '2': 72, '3': 78}, 'Walks': {'0': 14, '1': 39, '2': 76, '3': 37}, 'Years': {'0': 1, '1': 14, '2': 3, '3': 11}, 'CAtBat': {'0': 293, '1': 3449, '2': 1624, '3': 5628}, 'CHits': {'0': 66, '1': 835, '2': 457, '3': 1575}, 'CHmRun': {'0': 1, '1': 69, '2': 63, '3': 225}, 'CRuns': {'0': 30, '1': 321, '2': 224, '3': 828}, 'CRBI': {'0': 29, '1': 414, '2': 266, '3': 838}, 'CWalks': {'0': 14, '1': 375, '2': 263, '3': 354}, 'League': {'0': 'A', '1': 'N', '2': 'A', '3': 'N'}, 'Division': {'0': 'E', '1': 'W', '2': 'W', '3': 'E'}, 'PutOuts': {'0': 446, '1': 632, '2': 880, '3': 200}, 'Assists': {'0': 33, '1': 43, '2': 82, '3': 11}, 'Errors': {'0': 20, '1': 10, '2': 14, '3': 3}, 'Salary': {'0': None, '1': 475.0, '2': 480.0, '3': 500.0}, 'NewLeague': {'0': 'A', '1': 'N', '2': 'A', '3': 'N'}} <end_description>
4,528
0
6,245
4,528
69444261
# dictionary data = { "intents": [ { "tag": "greeting", "patterns": [ "hello", "hey", "how are you?", "hi there", "hi", "whats up", "what's up", "how's it going?", "hows it going?", "how you doing?", "how are you doing?", "how have you been?", "how's your day?", "how was your day going so far?", "how are things?", "how's everything?", "yo", "howdy", "how do you do", ], "responses": [ "howdy partner!", "hello", "hi there", "how are you doing?", "greetings!", "how do you do?", ], }, { "tag": "diagnosis", "patterns": [ "what type of cancer do i have?", "what is my exact diagnosis?", "what is my diagnosis?", "what cancer do i have?", "what is the result of diagnosing?", "what is my prognosis?", "what do you know about my diagnosing?", ], "responses": [ "you are lucky because your tumor is benign.", "your tumor has not yet reached an advanced stage." "we found that your tumor is benign.", ], }, { "tag": "treatment", "patterns": [ "what are my treatment options?", "which treatment do you recommend?", "what's the goal of my treatment?", "will i be cured?", "is there any possibility that i can be cured?", ], "responses": ["Surgery is the usual treatment for most brain tumors."], }, { "tag": "financial", "patterns": ["how much should i pay?", "how much money is this diagnosis?"], "responses": [ "it's totally free.", "i don't know either because the diagnosis is totally free", ], }, { "tag": "desperate", "patterns": [ "am i dying?", "my life is over!", "i don't wanna die!", "i'm so sad!", "i feel so bad!", ], "responses": ["i'm your doctor and i'll do my best to help you!"], }, { "tag": "goodbye", "patterns": [ "bye", "g2g", "see ya", "see you", "adios", "cya", "later", "goodbye", "bye bye", "thank you", ], "responses": [ "it was nice speaking to you", "see you later", "speak soon!", "take care!", "stay safe!", ], }, ] } import json import string import random import nltk import numpy as np from nltk.stem import WordNetLemmatizer import tensorflow as tf from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense, Dropout nltk.download("punkt") nltk.download("wordnet") lemmatizer = WordNetLemmatizer() words = [] classes = [] doc_X = [] doc_Y = [] for intent in data["intents"]: for pattern in intent["patterns"]: tokens = nltk.word_tokenize(pattern) words.extend(tokens) doc_X.append(pattern) doc_Y.append(intent["tag"]) if intent["tag"] not in classes: classes.append(intent["tag"]) words = [ lemmatizer.lemmatize(word.lower()) for word in words if word not in string.punctuation ] words = sorted(set(words)) classes = sorted(set(classes)) print("words:\n") print(words) print("\nclasses:\n") print(classes) print("\ndoc_X:\n") print(doc_X) print("\ndoc_Y:\n") print(doc_Y) training = [] out_empty = [0] * len(classes) for idx, doc in enumerate(doc_X): bow = [] text = (lemmatizer.lemmatize(doc)).lower() for word in words: bow.append(1) if word in text else bow.append(0) output_row = list(out_empty) output_row[classes.index(doc_Y[idx])] = 1 training.append([bow, output_row]) # shuffle the data and convert it to an array random.shuffle(training) training = np.array(training, dtype=object) # split the features and target labels train_X = np.array(list(training[:, 0])) train_y = np.array(list(training[:, 1])) input_shape = (len(train_X[0]),) output_shape = len(train_y[0]) epochs = 1000 # the deep learning model model = Sequential() model.add(Dense(128, input_shape=input_shape, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(64, activation="relu")) model.add(Dropout(0.3)) model.add(Dense(output_shape, activation="softmax")) adam = tf.keras.optimizers.Adam(learning_rate=0.01, decay=1e-6) model.compile(loss="categorical_crossentropy", optimizer=adam, metrics=["accuracy"]) print(model.summary()) model.fit(x=train_X, y=train_y, epochs=1000, verbose=1) def clean_text(text): tokens = nltk.word_tokenize(text) tokens = [lemmatizer.lemmatize(word) for word in tokens] return tokens def bag_of_words(text, vocab): tokens = clean_text(text) bow = [0] * len(vocab) for w in tokens: for idx, word in enumerate(vocab): if word == w: bow[idx] = 1 return np.array(bow) def pred_class(text, vocab, labels): bow = bag_of_words(text, vocab) result = model.predict(np.array([bow]))[0] thresh = 0.2 y_pred = [[idx, res] for idx, res in enumerate(result) if res > thresh] y_pred.sort(key=lambda x: x[1], reverse=True) return_list = [] for r in y_pred: return_list.append(labels[r[0]]) return return_list def get_response(intents_list, intents_json): tag = intents_list[0] list_of_intents = intents_json["intents"] for i in list_of_intents: if i["tag"] == tag: result = random.choice(i["responses"]) break return result # running the chatbot while True: message = input("") intents = pred_class(message, words, classes) result = get_response(intents, data) print("Doctor:" + result)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/444/69444261.ipynb
null
null
[{"Id": 69444261, "ScriptId": 18925433, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7737696, "CreationDate": "07/31/2021 05:17:14", "VersionNumber": 1.0, "Title": "chatbot", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 149.0, "LinesInsertedFromPrevious": 149.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# dictionary data = { "intents": [ { "tag": "greeting", "patterns": [ "hello", "hey", "how are you?", "hi there", "hi", "whats up", "what's up", "how's it going?", "hows it going?", "how you doing?", "how are you doing?", "how have you been?", "how's your day?", "how was your day going so far?", "how are things?", "how's everything?", "yo", "howdy", "how do you do", ], "responses": [ "howdy partner!", "hello", "hi there", "how are you doing?", "greetings!", "how do you do?", ], }, { "tag": "diagnosis", "patterns": [ "what type of cancer do i have?", "what is my exact diagnosis?", "what is my diagnosis?", "what cancer do i have?", "what is the result of diagnosing?", "what is my prognosis?", "what do you know about my diagnosing?", ], "responses": [ "you are lucky because your tumor is benign.", "your tumor has not yet reached an advanced stage." "we found that your tumor is benign.", ], }, { "tag": "treatment", "patterns": [ "what are my treatment options?", "which treatment do you recommend?", "what's the goal of my treatment?", "will i be cured?", "is there any possibility that i can be cured?", ], "responses": ["Surgery is the usual treatment for most brain tumors."], }, { "tag": "financial", "patterns": ["how much should i pay?", "how much money is this diagnosis?"], "responses": [ "it's totally free.", "i don't know either because the diagnosis is totally free", ], }, { "tag": "desperate", "patterns": [ "am i dying?", "my life is over!", "i don't wanna die!", "i'm so sad!", "i feel so bad!", ], "responses": ["i'm your doctor and i'll do my best to help you!"], }, { "tag": "goodbye", "patterns": [ "bye", "g2g", "see ya", "see you", "adios", "cya", "later", "goodbye", "bye bye", "thank you", ], "responses": [ "it was nice speaking to you", "see you later", "speak soon!", "take care!", "stay safe!", ], }, ] } import json import string import random import nltk import numpy as np from nltk.stem import WordNetLemmatizer import tensorflow as tf from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense, Dropout nltk.download("punkt") nltk.download("wordnet") lemmatizer = WordNetLemmatizer() words = [] classes = [] doc_X = [] doc_Y = [] for intent in data["intents"]: for pattern in intent["patterns"]: tokens = nltk.word_tokenize(pattern) words.extend(tokens) doc_X.append(pattern) doc_Y.append(intent["tag"]) if intent["tag"] not in classes: classes.append(intent["tag"]) words = [ lemmatizer.lemmatize(word.lower()) for word in words if word not in string.punctuation ] words = sorted(set(words)) classes = sorted(set(classes)) print("words:\n") print(words) print("\nclasses:\n") print(classes) print("\ndoc_X:\n") print(doc_X) print("\ndoc_Y:\n") print(doc_Y) training = [] out_empty = [0] * len(classes) for idx, doc in enumerate(doc_X): bow = [] text = (lemmatizer.lemmatize(doc)).lower() for word in words: bow.append(1) if word in text else bow.append(0) output_row = list(out_empty) output_row[classes.index(doc_Y[idx])] = 1 training.append([bow, output_row]) # shuffle the data and convert it to an array random.shuffle(training) training = np.array(training, dtype=object) # split the features and target labels train_X = np.array(list(training[:, 0])) train_y = np.array(list(training[:, 1])) input_shape = (len(train_X[0]),) output_shape = len(train_y[0]) epochs = 1000 # the deep learning model model = Sequential() model.add(Dense(128, input_shape=input_shape, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(64, activation="relu")) model.add(Dropout(0.3)) model.add(Dense(output_shape, activation="softmax")) adam = tf.keras.optimizers.Adam(learning_rate=0.01, decay=1e-6) model.compile(loss="categorical_crossentropy", optimizer=adam, metrics=["accuracy"]) print(model.summary()) model.fit(x=train_X, y=train_y, epochs=1000, verbose=1) def clean_text(text): tokens = nltk.word_tokenize(text) tokens = [lemmatizer.lemmatize(word) for word in tokens] return tokens def bag_of_words(text, vocab): tokens = clean_text(text) bow = [0] * len(vocab) for w in tokens: for idx, word in enumerate(vocab): if word == w: bow[idx] = 1 return np.array(bow) def pred_class(text, vocab, labels): bow = bag_of_words(text, vocab) result = model.predict(np.array([bow]))[0] thresh = 0.2 y_pred = [[idx, res] for idx, res in enumerate(result) if res > thresh] y_pred.sort(key=lambda x: x[1], reverse=True) return_list = [] for r in y_pred: return_list.append(labels[r[0]]) return return_list def get_response(intents_list, intents_json): tag = intents_list[0] list_of_intents = intents_json["intents"] for i in list_of_intents: if i["tag"] == tag: result = random.choice(i["responses"]) break return result # running the chatbot while True: message = input("") intents = pred_class(message, words, classes) result = get_response(intents, data) print("Doctor:" + result)
false
0
1,709
0
1,709
1,709
69444358
<jupyter_start><jupyter_text>Amazon ML Challenge 2021 HackerEarth Kaggle dataset identifier: amazon-ml-challenge-2021-hackerearth <jupyter_script>import pandas as pd import spacy import string import spacy.cli nlp = spacy.load("en_core_web_lg") import numpy as np import re import nltk from nltk.corpus import stopwords sw_nltk = stopwords.words("english") data = pd.read_csv( "../input/amazon-ml-challenge-2021-hackerearth/train.csv", escapechar="\\", quoting=3, ) data = data.head(1000000) data.dropna( how="all", subset=["TITLE", "DESCRIPTION", "BULLET_POINTS", "BRAND"], inplace=True ) data.dropna(how="all", subset=["TITLE", "DESCRIPTION", "BULLET_POINTS"], inplace=True) data["BRAND"].replace(np.nan, "unbranded", inplace=True) data.replace(np.nan, "", inplace=True) def remove_html_tags(text): """Remove html tags from a string""" clean = re.compile("<.*?>") return re.sub(clean, "", text) def cleaner(text): if text == "": return "" text = remove_html_tags(text) text = text.lower() text = text.translate(str.maketrans("", "", string.punctuation)) words = [word for word in text.split(" ") if word not in sw_nltk] return " ".join(words) data["TITLE"] = data["TITLE"].apply(lambda x: cleaner(x)) data["DESCRIPTION"] = data["DESCRIPTION"].apply(lambda x: cleaner(x)) data["BULLET_POINTS"] = data["BULLET_POINTS"].apply(lambda x: cleaner(x)) data["BRAND"] = data["BRAND"].apply(lambda x: cleaner(x)) data.to_pickle("./partiallyCleanedData_K.pkl") data = pd.read_pickle("./partiallyCleanedData_K.pkl") data.head() import yake keyword_extractor = yake.KeywordExtractor(top=10, n=2) def extract_keywords(text): keywords = keyword_extractor.extract_keywords(text) return " ".join([kw[0] for kw in keywords]) for index, row in data.iterrows(): if data.at[index, "BULLET_POINTS"] != "": continue if data.at[index, "BULLET_POINTS"] == "" and data.at[index, "DESCRIPTION"] != "": data.at[index, "BULLET_POINTS"] = extract_keywords( data.at[index, "DESCRIPTION"] ) elif ( data.at[index, "BULLET_POINTS"] == "" and data.at[index, "DESCRIPTION"] == "" and data.at[index, "TITLE"] != "" ): data.at[index, "BULLET_POINTS"] = extract_keywords(data.at[index, "TITLE"]) data.head() finalDF = data[data.columns[2:]] finalDF.to_pickle("./finalData_K.pkl") finalDF.to_csv("./finalData_K.csv")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/444/69444358.ipynb
amazon-ml-challenge-2021-hackerearth
nakuuu
[{"Id": 69444358, "ScriptId": 18960099, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7907453, "CreationDate": "07/31/2021 05:19:13", "VersionNumber": 3.0, "Title": "Data cleaning", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 75.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 73.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
[{"Id": 92620604, "KernelVersionId": 69444358, "SourceDatasetVersionId": 2477615}]
[{"Id": 2477615, "DatasetId": 1499446, "DatasourceVersionId": 2520137, "CreatorUserId": 7333054, "LicenseName": "Unknown", "CreationDate": "07/29/2021 18:42:11", "VersionNumber": 1.0, "Title": "Amazon ML Challenge 2021 HackerEarth", "Slug": "amazon-ml-challenge-2021-hackerearth", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1499446, "CreatorUserId": 7333054, "OwnerUserId": 7333054.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2477615.0, "CurrentDatasourceVersionId": 2520137.0, "ForumId": 1519176, "Type": 2, "CreationDate": "07/29/2021 18:42:11", "LastActivityDate": "07/29/2021", "TotalViews": 10376, "TotalDownloads": 292, "TotalVotes": 31, "TotalKernels": 5}]
[{"Id": 7333054, "UserName": "nakuuu", "DisplayName": "Nakuuu", "RegisterDate": "05/03/2021", "PerformanceTier": 0}]
import pandas as pd import spacy import string import spacy.cli nlp = spacy.load("en_core_web_lg") import numpy as np import re import nltk from nltk.corpus import stopwords sw_nltk = stopwords.words("english") data = pd.read_csv( "../input/amazon-ml-challenge-2021-hackerearth/train.csv", escapechar="\\", quoting=3, ) data = data.head(1000000) data.dropna( how="all", subset=["TITLE", "DESCRIPTION", "BULLET_POINTS", "BRAND"], inplace=True ) data.dropna(how="all", subset=["TITLE", "DESCRIPTION", "BULLET_POINTS"], inplace=True) data["BRAND"].replace(np.nan, "unbranded", inplace=True) data.replace(np.nan, "", inplace=True) def remove_html_tags(text): """Remove html tags from a string""" clean = re.compile("<.*?>") return re.sub(clean, "", text) def cleaner(text): if text == "": return "" text = remove_html_tags(text) text = text.lower() text = text.translate(str.maketrans("", "", string.punctuation)) words = [word for word in text.split(" ") if word not in sw_nltk] return " ".join(words) data["TITLE"] = data["TITLE"].apply(lambda x: cleaner(x)) data["DESCRIPTION"] = data["DESCRIPTION"].apply(lambda x: cleaner(x)) data["BULLET_POINTS"] = data["BULLET_POINTS"].apply(lambda x: cleaner(x)) data["BRAND"] = data["BRAND"].apply(lambda x: cleaner(x)) data.to_pickle("./partiallyCleanedData_K.pkl") data = pd.read_pickle("./partiallyCleanedData_K.pkl") data.head() import yake keyword_extractor = yake.KeywordExtractor(top=10, n=2) def extract_keywords(text): keywords = keyword_extractor.extract_keywords(text) return " ".join([kw[0] for kw in keywords]) for index, row in data.iterrows(): if data.at[index, "BULLET_POINTS"] != "": continue if data.at[index, "BULLET_POINTS"] == "" and data.at[index, "DESCRIPTION"] != "": data.at[index, "BULLET_POINTS"] = extract_keywords( data.at[index, "DESCRIPTION"] ) elif ( data.at[index, "BULLET_POINTS"] == "" and data.at[index, "DESCRIPTION"] == "" and data.at[index, "TITLE"] != "" ): data.at[index, "BULLET_POINTS"] = extract_keywords(data.at[index, "TITLE"]) data.head() finalDF = data[data.columns[2:]] finalDF.to_pickle("./finalData_K.pkl") finalDF.to_csv("./finalData_K.csv")
false
1
749
5
788
749
69444475
<jupyter_start><jupyter_text>Covid19 Osaka date examined examined positives accumulated positives current positives left hospital accumulated left hospital left hospital found accumulated left hospital found deaths link unknown covid19 open data in osaka in japan [https://covid19-osaka.info/](https://covid19-osaka.info/) Kaggle dataset identifier: covid19-osaka <jupyter_script>import os import numpy as np import pandas as pd import random import seaborn as sns import datetime as datetime import matplotlib.dates as dates import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots from contextlib import contextmanager from time import time from tqdm import tqdm import lightgbm as lgbm from sklearn.metrics import classification_report, log_loss, accuracy_score from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold # # data : osaka_summary.csv data00 = pd.read_csv("../input/covid19-osaka/osaka_summary - 0731.csv") data00[-5:] data0 = data00[["date", "examined positives"]] data0["ratio to 7 days before"] = 1 n = len(data0) for i in range(n): cpi = data0["examined positives"][i] data0.loc[i + 7, "positives 7 days before"] = cpi data0[100:114] data0["ratio to 7 days before"] = ( data0["examined positives"] * 100 / data0["positives 7 days before"] ) data0["mean ratio to 7 days before"] = ( data0["ratio to 7 days before"].rolling(window=7).mean() ) data1 = data0[400:-7] data1 fig = make_subplots(specs=[[{"secondary_y": False}]]) fig.add_trace( go.Scatter( x=data1["date"], y=data1["mean ratio to 7 days before"], name="mean ratio weekly", ), secondary_y=False, ) fig.update_layout( autosize=False, width=700, height=500, title_text="Mean ratio of increase weekly in Osaka", ) fig.update_xaxes(title_text="Date") fig.update_yaxes(title_text="Ratio", secondary_y=False) fig.show() data2 = data1[-7:] data2 factor = round(data2.loc[data2.index.tolist()[-1], "mean ratio to 7 days before"]) / 100 print(factor) N = [] for i in range(40): N += [i] NUMB = pd.DataFrame(N) data2["date2"] = pd.to_datetime(data2["date"]) data2 = pd.concat([data2, NUMB], axis=0) data2 = data2.drop([0, "date"], axis=1).reset_index(drop=True) data2[0:10] data2["pcr_positives_ft"] = data2["examined positives"] data2["pcr_positives160"] = data2["examined positives"] data2["pcr_positives140"] = data2["examined positives"] data2["pcr_positives110"] = data2["examined positives"] for i in range(7, 47): data2.loc[i, "date2"] = data2.loc[i - 1, "date2"] + datetime.timedelta(days=1) data2.loc[i, "pcr_positives_ft"] = data2.loc[i - 7, "pcr_positives_ft"] * factor data2.loc[i, "pcr_positives160"] = data2.loc[i - 7, "pcr_positives160"] * 1.6 data2.loc[i, "pcr_positives140"] = data2.loc[i - 7, "pcr_positives140"] * 1.4 data2.loc[i, "pcr_positives110"] = data2.loc[i - 7, "pcr_positives110"] * 1.1 data3 = data2[ [ "date2", "examined positives", "pcr_positives_ft", "pcr_positives160", "pcr_positives140", "pcr_positives110", ] ] data3 fig = make_subplots(specs=[[{"secondary_y": False}]]) fig.add_trace( go.Scatter( x=data3["date2"], y=data3["pcr_positives_ft"], name="the latest ratio of incresase weekly", ), secondary_y=False, ) fig.add_trace( go.Scatter( x=data3["date2"], y=data3["pcr_positives160"], name="160% incresase weekly" ), secondary_y=False, ) fig.add_trace( go.Scatter( x=data3["date2"], y=data3["pcr_positives140"], name="140% incresase weekly" ), secondary_y=False, ) fig.add_trace( go.Scatter( x=data3["date2"], y=data3["pcr_positives110"], name="110% incresase weekly" ), secondary_y=False, ) fig.add_trace( go.Scatter( x=data3["date2"], y=data3["examined positives"], name="actual number positives" ), secondary_y=False, ) fig.update_layout( autosize=False, width=700, height=500, title_text="Prediction of number positives in Osaka", ) fig.update_xaxes(title_text="Date") fig.update_yaxes(title_text="Number", secondary_y=False) fig.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/444/69444475.ipynb
covid19-osaka
stpeteishii
[{"Id": 69444475, "ScriptId": 18682111, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2648923, "CreationDate": "07/31/2021 05:21:19", "VersionNumber": 14.0, "Title": "Covid19 Osaka Future Prediction", "EvaluationDate": "07/31/2021", "IsChange": false, "TotalLines": 91.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 91.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 92620834, "KernelVersionId": 69444475, "SourceDatasetVersionId": 2481115}]
[{"Id": 2481115, "DatasetId": 1399848, "DatasourceVersionId": 2523645, "CreatorUserId": 2648923, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "07/30/2021 16:54:17", "VersionNumber": 18.0, "Title": "Covid19 Osaka", "Slug": "covid19-osaka", "Subtitle": "Covid19 daily infection situation in Osaka Japan", "Description": "date\nexamined\nexamined positives\naccumulated positives\ncurrent positives\nleft hospital\naccumulated left hospital\nleft hospital found\naccumulated left hospital found\ndeaths\nlink unknown\n\ncovid19 open data in osaka in japan\n[https://covid19-osaka.info/](https://covid19-osaka.info/)", "VersionNotes": "Data Update 2021/07/30", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1399848, "CreatorUserId": 2648923, "OwnerUserId": 2648923.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2621366.0, "CurrentDatasourceVersionId": 2665143.0, "ForumId": 1419133, "Type": 2, "CreationDate": "06/10/2021 02:52:04", "LastActivityDate": "06/10/2021", "TotalViews": 3932, "TotalDownloads": 39, "TotalVotes": 9, "TotalKernels": 6}]
[{"Id": 2648923, "UserName": "stpeteishii", "DisplayName": "stpete_ishii", "RegisterDate": "12/26/2018", "PerformanceTier": 2}]
import os import numpy as np import pandas as pd import random import seaborn as sns import datetime as datetime import matplotlib.dates as dates import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots from contextlib import contextmanager from time import time from tqdm import tqdm import lightgbm as lgbm from sklearn.metrics import classification_report, log_loss, accuracy_score from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold # # data : osaka_summary.csv data00 = pd.read_csv("../input/covid19-osaka/osaka_summary - 0731.csv") data00[-5:] data0 = data00[["date", "examined positives"]] data0["ratio to 7 days before"] = 1 n = len(data0) for i in range(n): cpi = data0["examined positives"][i] data0.loc[i + 7, "positives 7 days before"] = cpi data0[100:114] data0["ratio to 7 days before"] = ( data0["examined positives"] * 100 / data0["positives 7 days before"] ) data0["mean ratio to 7 days before"] = ( data0["ratio to 7 days before"].rolling(window=7).mean() ) data1 = data0[400:-7] data1 fig = make_subplots(specs=[[{"secondary_y": False}]]) fig.add_trace( go.Scatter( x=data1["date"], y=data1["mean ratio to 7 days before"], name="mean ratio weekly", ), secondary_y=False, ) fig.update_layout( autosize=False, width=700, height=500, title_text="Mean ratio of increase weekly in Osaka", ) fig.update_xaxes(title_text="Date") fig.update_yaxes(title_text="Ratio", secondary_y=False) fig.show() data2 = data1[-7:] data2 factor = round(data2.loc[data2.index.tolist()[-1], "mean ratio to 7 days before"]) / 100 print(factor) N = [] for i in range(40): N += [i] NUMB = pd.DataFrame(N) data2["date2"] = pd.to_datetime(data2["date"]) data2 = pd.concat([data2, NUMB], axis=0) data2 = data2.drop([0, "date"], axis=1).reset_index(drop=True) data2[0:10] data2["pcr_positives_ft"] = data2["examined positives"] data2["pcr_positives160"] = data2["examined positives"] data2["pcr_positives140"] = data2["examined positives"] data2["pcr_positives110"] = data2["examined positives"] for i in range(7, 47): data2.loc[i, "date2"] = data2.loc[i - 1, "date2"] + datetime.timedelta(days=1) data2.loc[i, "pcr_positives_ft"] = data2.loc[i - 7, "pcr_positives_ft"] * factor data2.loc[i, "pcr_positives160"] = data2.loc[i - 7, "pcr_positives160"] * 1.6 data2.loc[i, "pcr_positives140"] = data2.loc[i - 7, "pcr_positives140"] * 1.4 data2.loc[i, "pcr_positives110"] = data2.loc[i - 7, "pcr_positives110"] * 1.1 data3 = data2[ [ "date2", "examined positives", "pcr_positives_ft", "pcr_positives160", "pcr_positives140", "pcr_positives110", ] ] data3 fig = make_subplots(specs=[[{"secondary_y": False}]]) fig.add_trace( go.Scatter( x=data3["date2"], y=data3["pcr_positives_ft"], name="the latest ratio of incresase weekly", ), secondary_y=False, ) fig.add_trace( go.Scatter( x=data3["date2"], y=data3["pcr_positives160"], name="160% incresase weekly" ), secondary_y=False, ) fig.add_trace( go.Scatter( x=data3["date2"], y=data3["pcr_positives140"], name="140% incresase weekly" ), secondary_y=False, ) fig.add_trace( go.Scatter( x=data3["date2"], y=data3["pcr_positives110"], name="110% incresase weekly" ), secondary_y=False, ) fig.add_trace( go.Scatter( x=data3["date2"], y=data3["examined positives"], name="actual number positives" ), secondary_y=False, ) fig.update_layout( autosize=False, width=700, height=500, title_text="Prediction of number positives in Osaka", ) fig.update_xaxes(title_text="Date") fig.update_yaxes(title_text="Number", secondary_y=False) fig.show()
false
1
1,427
2
1,533
1,427
69444713
<jupyter_start><jupyter_text>Satellite Images of Hurricane Damage ### Overview The data are satellite images from Texas after Hurricane Harvey divided into two groups (damage and no_damage). The goal is to make a model which can automatically identify if a given region is likely to contain flooding damage. ### Source Data originally taken from: https://ieee-dataport.org/open-access/detecting-damaged-buildings-post-hurricane-satellite-imagery-based-customized and can be cited with http://dx.doi.org/10.21227/sdad-1e56 and the original paper is here: https://arxiv.org/abs/1807.01688 Kaggle dataset identifier: satellite-images-of-hurricane-damage <jupyter_script>import pandas as pd import numpy as np import os from glob import glob import random import matplotlib.pylab as plt import seaborn as sns pd.set_option("display.max_colwidth", None) img_paths = [] img_labels = [] for i in glob("../input/satellite-images-of-hurricane-damage/**", recursive=True): if i.endswith(".jpeg"): a = i.split("/") img_paths.append(i) img_labels.append(a[-2]) len(img_paths), len(img_labels) img_path = pd.Series(img_paths).astype(str) labels = pd.Series(img_labels) data = pd.concat([img_path, labels], axis=1) data.sample(5) plt.figure(figsize=(15, 5)) sns.countplot(x=data[1]) from sklearn.model_selection import train_test_split train_set, test_set = train_test_split(data, test_size=0.25, random_state=0) train_set.shape, test_set.shape import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow import keras train_gen = ImageDataGenerator(validation_split=0.1) test_gen = ImageDataGenerator() train_data = train_gen.flow_from_dataframe( dataframe=train_set, x_col=0, y_col=1, target_size=(227, 227), color_mode="rgb", class_mode="categorical", shuffle=True, subset="training", batch_size=100, ) val_data = train_gen.flow_from_dataframe( dataframe=train_set, x_col=0, y_col=1, target_size=(227, 227), color_mode="rgb", class_mode="categorical", shuffle=False, subset="validation", batch_size=100, ) test_data = test_gen.flow_from_dataframe( dataframe=test_set, x_col=0, y_col=1, target_size=(227, 227), color_mode="rgb", class_mode="categorical", shuffle=False, batch_size=100, ) model = keras.models.Sequential( [ keras.layers.Conv2D( filters=96, kernel_size=(11, 11), strides=(4, 4), activation="relu", input_shape=(227, 227, 3), ), keras.layers.BatchNormalization(), keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)), keras.layers.Conv2D( filters=256, kernel_size=(5, 5), strides=(1, 1), activation="relu", padding="same", ), keras.layers.BatchNormalization(), keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)), keras.layers.Conv2D( filters=384, kernel_size=(3, 3), strides=(1, 1), activation="relu", padding="same", ), keras.layers.BatchNormalization(), keras.layers.Conv2D( filters=384, kernel_size=(3, 3), strides=(1, 1), activation="relu", padding="same", ), keras.layers.BatchNormalization(), keras.layers.Conv2D( filters=256, kernel_size=(3, 3), strides=(1, 1), activation="relu", padding="same", ), keras.layers.BatchNormalization(), keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)), keras.layers.Flatten(), keras.layers.Dense(4096, activation="relu"), keras.layers.Dropout(0.5), keras.layers.Dense(4096, activation="relu"), keras.layers.Dropout(0.5), keras.layers.Dense(2, activation="sigmoid"), ] ) model.compile( optimizer=tf.optimizers.Adam(lr=0.000001), loss="binary_crossentropy", metrics=["accuracy", "Recall"], ) model.summary() history = model.fit(train_data, epochs=15, validation_data=val_data) import matplotlib.pyplot as plt # plotting the Accuracy of test and training sets plt.plot(history.history["accuracy"]) plt.plot(history.history["val_accuracy"]) plt.title("Model Accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() # plotting the loss of test and training sets plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("Model Loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() y_pred = model.predict(test_data) y_pred = np.argmax(y_pred, axis=1) from sklearn.metrics import classification_report, confusion_matrix print(classification_report(test_data.labels, y_pred)) classes = ["Damage", "No Damage"] con_mat_df = pd.DataFrame( confusion_matrix(test_data.labels, y_pred), index=classes, columns=classes ) import seaborn as sns figure = plt.figure(figsize=(12, 6)) sns.heatmap(con_mat_df, annot=True, cmap=plt.cm.cool, fmt="d") plt.tight_layout() plt.ylabel("Actual") plt.xlabel("Predicted") plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/444/69444713.ipynb
satellite-images-of-hurricane-damage
kmader
[{"Id": 69444713, "ScriptId": 18961105, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5422947, "CreationDate": "07/31/2021 05:25:24", "VersionNumber": 1.0, "Title": "AlexNet-Hurricane-Damage-Detection", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 144.0, "LinesInsertedFromPrevious": 144.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92621337, "KernelVersionId": 69444713, "SourceDatasetVersionId": 742001}]
[{"Id": 742001, "DatasetId": 383256, "DatasourceVersionId": 762631, "CreatorUserId": 67483, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "10/15/2019 20:42:56", "VersionNumber": 1.0, "Title": "Satellite Images of Hurricane Damage", "Slug": "satellite-images-of-hurricane-damage", "Subtitle": "Detecting Damaged Buildings", "Description": "### Overview\nThe data are satellite images from Texas after Hurricane Harvey divided into two groups (damage and no_damage). The goal is to make a model which can automatically identify if a given region is likely to contain flooding damage.\n\n### Source\nData originally taken from: https://ieee-dataport.org/open-access/detecting-damaged-buildings-post-hurricane-satellite-imagery-based-customized and can be cited with http://dx.doi.org/10.21227/sdad-1e56 and the original paper is here: https://arxiv.org/abs/1807.01688", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 383256, "CreatorUserId": 67483, "OwnerUserId": 67483.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 742001.0, "CurrentDatasourceVersionId": 762631.0, "ForumId": 395209, "Type": 2, "CreationDate": "10/15/2019 20:42:56", "LastActivityDate": "10/15/2019", "TotalViews": 35863, "TotalDownloads": 2399, "TotalVotes": 48, "TotalKernels": 36}]
[{"Id": 67483, "UserName": "kmader", "DisplayName": "K Scott Mader", "RegisterDate": "11/04/2012", "PerformanceTier": 4}]
import pandas as pd import numpy as np import os from glob import glob import random import matplotlib.pylab as plt import seaborn as sns pd.set_option("display.max_colwidth", None) img_paths = [] img_labels = [] for i in glob("../input/satellite-images-of-hurricane-damage/**", recursive=True): if i.endswith(".jpeg"): a = i.split("/") img_paths.append(i) img_labels.append(a[-2]) len(img_paths), len(img_labels) img_path = pd.Series(img_paths).astype(str) labels = pd.Series(img_labels) data = pd.concat([img_path, labels], axis=1) data.sample(5) plt.figure(figsize=(15, 5)) sns.countplot(x=data[1]) from sklearn.model_selection import train_test_split train_set, test_set = train_test_split(data, test_size=0.25, random_state=0) train_set.shape, test_set.shape import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow import keras train_gen = ImageDataGenerator(validation_split=0.1) test_gen = ImageDataGenerator() train_data = train_gen.flow_from_dataframe( dataframe=train_set, x_col=0, y_col=1, target_size=(227, 227), color_mode="rgb", class_mode="categorical", shuffle=True, subset="training", batch_size=100, ) val_data = train_gen.flow_from_dataframe( dataframe=train_set, x_col=0, y_col=1, target_size=(227, 227), color_mode="rgb", class_mode="categorical", shuffle=False, subset="validation", batch_size=100, ) test_data = test_gen.flow_from_dataframe( dataframe=test_set, x_col=0, y_col=1, target_size=(227, 227), color_mode="rgb", class_mode="categorical", shuffle=False, batch_size=100, ) model = keras.models.Sequential( [ keras.layers.Conv2D( filters=96, kernel_size=(11, 11), strides=(4, 4), activation="relu", input_shape=(227, 227, 3), ), keras.layers.BatchNormalization(), keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)), keras.layers.Conv2D( filters=256, kernel_size=(5, 5), strides=(1, 1), activation="relu", padding="same", ), keras.layers.BatchNormalization(), keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)), keras.layers.Conv2D( filters=384, kernel_size=(3, 3), strides=(1, 1), activation="relu", padding="same", ), keras.layers.BatchNormalization(), keras.layers.Conv2D( filters=384, kernel_size=(3, 3), strides=(1, 1), activation="relu", padding="same", ), keras.layers.BatchNormalization(), keras.layers.Conv2D( filters=256, kernel_size=(3, 3), strides=(1, 1), activation="relu", padding="same", ), keras.layers.BatchNormalization(), keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)), keras.layers.Flatten(), keras.layers.Dense(4096, activation="relu"), keras.layers.Dropout(0.5), keras.layers.Dense(4096, activation="relu"), keras.layers.Dropout(0.5), keras.layers.Dense(2, activation="sigmoid"), ] ) model.compile( optimizer=tf.optimizers.Adam(lr=0.000001), loss="binary_crossentropy", metrics=["accuracy", "Recall"], ) model.summary() history = model.fit(train_data, epochs=15, validation_data=val_data) import matplotlib.pyplot as plt # plotting the Accuracy of test and training sets plt.plot(history.history["accuracy"]) plt.plot(history.history["val_accuracy"]) plt.title("Model Accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() # plotting the loss of test and training sets plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("Model Loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() y_pred = model.predict(test_data) y_pred = np.argmax(y_pred, axis=1) from sklearn.metrics import classification_report, confusion_matrix print(classification_report(test_data.labels, y_pred)) classes = ["Damage", "No Damage"] con_mat_df = pd.DataFrame( confusion_matrix(test_data.labels, y_pred), index=classes, columns=classes ) import seaborn as sns figure = plt.figure(figsize=(12, 6)) sns.heatmap(con_mat_df, annot=True, cmap=plt.cm.cool, fmt="d") plt.tight_layout() plt.ylabel("Actual") plt.xlabel("Predicted") plt.show()
false
0
1,439
0
1,632
1,439
69444421
<jupyter_start><jupyter_text>new_model Kaggle dataset identifier: new-model <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("../input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import mlb import json import gc # load model import joblib model_path = "../input/new-model/" model1 = joblib.load(model_path + "new_model1.pkl") model2 = joblib.load(model_path + "new_model2.pkl") model3 = joblib.load(model_path + "new_model3.pkl") model4 = joblib.load(model_path + "new_model4.pkl") def merge_nested_form(form, column): result = pd.DataFrame() for index, value in form[column].items(): value = str(value) if (value != "nan") and (value is not np.nan): unpack = pd.read_json(value) result = pd.concat([result, unpack], axis=0) index = pd.Series(list(range(0, result.shape[0]))) # 给整张表重置一下index result = result.set_index(index) return result # def merge_nested_form(form, name): # l = [] # rows = len(form[name]) # print(name) # print('rows:',rows) # for i in range(rows): # # for now if nan, we skip, NEED TO CHANGE later # print('type:',type(form[name].iloc[i])) # # print('ele:',form[name].iloc[i]) # json_str = str(form[name].iloc[i]) # print(json_str == 'nan') # # print('str:',json_str) # if(json_str=='nan' or pd.isna(json_str)): # continue # l.append(pd.read_json(json_str)) # mergedDF = pd.concat(l, ignore_index = True) # return mergedDF def unpack_certain_column_and_concat_date(form, column): form = form.copy() result = pd.DataFrame() # 先创建一个空白表格 for index, value in form[column].iteritems(): # 把这一列逐行遍历 if pd.isnull(index): # 注意判断方法 continue else: unpack = pd.read_json(value) # 把有内容的单元解压缩成df date = form["date"].loc[index] # 找到 unpack 出的这个表格对应的日期是多少 rows_amount = unpack.shape[0] # 看一下这个 unpack 的表格一共有多少行 unpack["date"] = pd.Series(data=[int(date)] * rows_amount) # 因为整张 unpack 表都对应一个日期,所以用乘法的方法,把这个日期广播到 date 那一列的每一行 result = pd.concat([result, unpack], axis=0) # 把解压缩出来的内容和每个拼在一起 # 刚才为了实现广播这个操作,date的格式是int # 现在需要把 date 那一列的格式改成 str,因为其他表 date 那一列都是 str,不改的话后面 merge 很麻烦 # 但是需要先转成 datetime 格式,因为年份月份和日期之间需要加上分界线 result["date"] = pd.to_datetime(result["date"], format="%Y%m%d") result["date"] = result["date"].dt.date result["date"] = result["date"].astype(str) index = pd.Series(list(range(0, result.shape[0]))) # 给整张表重置一下index result = result.set_index(index) return result def unique_record(data, column1, column2, new_column): data = data.copy() data[column1] = data[column1].astype(str) data[column2] = data[column2].astype(str) data[new_column] = data[column1] + data[column2] # 到此为止,给原表重新创造了一列,后面要以这一列为基准 temp = data.groupby( new_column ).max() # 这里应该是 groupby().max(),这个函数好像是没保存,跑一下如果报错了就是这个有问题 # 把一天之内参加了多场比赛的球队/队员的信息保留最大值 # 之所以保留最大值,是因为这样也能保留下字符串的列 # 但是这一步会导致 index 的错乱,同时 string 的列会被自动删除 temp[new_column] = temp.index reset_index = pd.Series(list(range(0, temp.shape[0]))) # 给整张表重置一下index temp = temp.set_index(reset_index) # 因为刚才的 groupby 计算搞乱了 index,需要重置一下 """ categorical_columns = data[data.columns[data.dtypes == object]] # 这一步就很牛逼了:把数据格式为 object 的列拎出来 # 注意,被拎出来的列中一定包括 columns1,column2,和 new_column result = pd.merge(temp,categorical_columns,on=new_column,how='left') """ # 因为刚才的 groupby 函数使用的是 max,不会对字符串的列产生影响,否则还要想办法处理这些列 # 不过写在这里的处理方法也不是完美的,会导致一部分的数据被迫丢失 result = temp return result def merge_two_nested_form(primary_form, secondary_form, key): primary_form = primary_form.copy() secondary_form = secondary_form.copy() result = pd.DataFrame() merge_area = round(primary_form.shape[0] / 100) # 每次 merge 1% start = 0 for n in range(0, 100): start = n * merge_area end = min(start + merge_area, primary_form.shape[0]) temp = pd.merge( primary_form.iloc[start:end], secondary_form, on=key, # 加这个参数很重要,不然的话电脑需要一个一个去匹配,内存会爆 how="left", ) result = result.append(temp) gc.collect() return result def target_encode(data, y, a=0, p=0): # 输入的 data 格式全部都是 category value data = data.copy() columns = data.columns.drop(y) for n in columns: temp = data.dropna(subset=[n]) # 必须先复制一个表(没用copy是因为dropna默认inpalce为否),然后把groupby函数括号中的那一列(也就是n那一列)中的nan全都给删掉,然后再按照n那一列groupby # 不然会报错,因为groupby的时候是自动dropna的,transform 返回结果的行数就少了,和原来的列表行数不匹配 # 因为pandas要求transform返回的series行数必须和原表的行数相同,即使不把transform的结果插入原表,系统也这么要求自己 # 注意,是 transform 结果的长度不匹配,不是插入新列的时候长度不匹配。所以我们先得到transform的结果,在按照index插入原表就不存在行数不匹配的问题 data[f"{n}(target)"] = temp.groupby(n)[y].transform( lambda x: (np.sum(x) + a * p) / (x.count() + a) ) # 按照每一列 groupby,然后把每一种值对应的y给整理出来,求和 和 计数 分别作为分子分母 # a 和 p 是两个超参数,用于避免 overfitting,默认为0 data.drop(columns=list(columns), inplace=True) return data # dict_path = '/kaggle/input/encoded-dict/' # encode_dict = json.load(open(dict_path+"dict.txt")) # tt_dict_path = '/kaggle/input/twitter-encoded-dict/' # ptt_dict = json.load(open(tt_dict_path+"ptt_dict.txt")) # ttt_dict = json.load(open(tt_dict_path+"ttt_dict.txt")) # twitter_info = pd.read_csv("../input/twitter-info-csv/twitter_info.csv") # ptt_dict = pd.Series(twitter_info['playerId(target)'].values,index=twitter_info.player_twitter).to_dict() # ttt_dict = pd.Series(twitter_info['playerId(target)'].values,index=twitter_info.team_twitter).to_dict() def twitter_fillna(data, player_twitter, team_twitter, playerId, teamId): data = data.copy() # 先把所有的 player_twitter 项给 fillna 一下 # 对于那些有记录的 palyer 来说,由于记录发生在每月的1号,所以这一个月的 player_twitter 全部按照1号的填充 # 可能有些人有的月份没有 player_twitter 记录,那么就按上一个月的记录延续,如果是开头几个月没有,那么就按最早的记录填充 # 所以先 forward fill,在 back fill data[player_twitter] = data.groupby(playerId)[player_twitter].fillna(method="ffill") data[player_twitter] = data.groupby(playerId)[player_twitter].fillna(method="bfill") # 但是这么操作一圈以后,有些人他们一条 player_twitter 记录都没有,只能全部-1了 data[player_twitter] = data[player_twitter].fillna(-1) # 接下来处理 team_twitter # 对于那些有 teamId 的行,team_twitter 的缺失主要是由于 team_twitter 的记录都在每个月的1号 data[team_twitter] = data.groupby( [teamId], )[ team_twitter ].fillna(method="ffill") # 然后把没有 teamId 记录的 player_date,全部 team_twitter =-1 data[team_twitter] = data[team_twitter].loc[data[teamId].isnull() == True] = -1 return data def var_type_judge(data): data = data.copy() result = pd.DataFrame(index=data.columns, columns=["variable type"]) for c, n in data.iteritems(): if data[c].unique().shape[0] == data[c].shape[0]: result.loc[c] = "ID" elif data[c].dtype == "int64" or data[c].dtype == "float64": result.loc[c] = "numerical" elif data[c].dtype == "O": result.loc[c] = "catagorical" else: result.loc[c] = "?????????" return result def trim_fraction(text): text = str(text) if ".0" in text: return text[: text.rfind(".0")] return text X_columns = [ "playerId", "teamId", "player_twitter", "team_twitter", "status", "awardId", "seriesDescription", "Current_winning_percentage", "Score", "home", "jerseyNum", "positionCode", "battingOrder", "gamesPlayedBatting", "flyOuts", "groundOuts", "runsScored", "doubles", "triples", "homeRuns", "strikeOuts", "baseOnBalls", "intentionalWalks", "hits", "hitByPitch", "atBats", "caughtStealing", "stolenBases", "groundIntoDoublePlay", "groundIntoTriplePlay", "plateAppearances", "totalBases", "rbi", "leftOnBase", "sacBunts", "sacFlies", "catchersInterference", "pickoffs", "gamesPlayedPitching", "gamesStartedPitching", "completeGamesPitching", "shutoutsPitching", "winsPitching", "lossesPitching", "flyOutsPitching", "airOutsPitching", "groundOutsPitching", "runsPitching", "doublesPitching", "triplesPitching", "homeRunsPitching", "strikeOutsPitching", "baseOnBallsPitching", "intentionalWalksPitching", "hitsPitching", "hitByPitchPitching", "atBatsPitching", "caughtStealingPitching", "stolenBasesPitching", "inningsPitched", "saveOpportunities", "earnedRuns", "battersFaced", "outsPitching", "pitchesThrown", "balls", "strikes", "hitBatsmen", "balks", "wildPitches", "pickoffsPitching", "rbiPitching", "gamesFinishedPitching", "inheritedRunners", "inheritedRunnersScored", "catchersInterferencePitching", "sacBuntsPitching", "sacFliesPitching", "saves", "holds", "blownSaves", "assists", "putOuts", "errors", "chances", "divisionRank", "leagueRank", "totalWins_onTHEseason", "totalLosses_onTHEseason", "homeWins_onTHEseason", "homeLosses_onTHEseason", "awayWins_onTHEseason", "awayLosses_onTHEseason", "positionName", "positionType", ] df_X = [ "playerId", "teamId", "status", "seriesDescription", "Current_winning_percentage", "Score", "home", "jerseyNum", "positionCode", "positionName", "positionType", "battingOrder", "gamesPlayedBatting", "flyOuts", "groundOuts", "runsScored", "doubles", "triples", "homeRuns", "strikeOuts", "baseOnBalls", "intentionalWalks", "hits", "hitByPitch", "atBats", "caughtStealing", "stolenBases", "groundIntoDoublePlay", "groundIntoTriplePlay", "plateAppearances", "totalBases", "rbi", "leftOnBase", "sacBunts", "sacFlies", "catchersInterference", "pickoffs", "gamesPlayedPitching", "gamesStartedPitching", "completeGamesPitching", "shutoutsPitching", "winsPitching", "lossesPitching", "flyOutsPitching", "airOutsPitching", "groundOutsPitching", "runsPitching", "doublesPitching", "triplesPitching", "homeRunsPitching", "strikeOutsPitching", "baseOnBallsPitching", "intentionalWalksPitching", "hitsPitching", "hitByPitchPitching", "atBatsPitching", "caughtStealingPitching", "stolenBasesPitching", "inningsPitched", "saveOpportunities", "earnedRuns", "battersFaced", "outsPitching", "pitchesThrown", "balls", "strikes", "hitBatsmen", "balks", "wildPitches", "pickoffsPitching", "rbiPitching", "gamesFinishedPitching", "inheritedRunners", "inheritedRunnersScored", "catchersInterferencePitching", "sacBuntsPitching", "sacFliesPitching", "saves", "holds", "blownSaves", "assists", "putOuts", "errors", "chances", "divisionRank", "leagueRank", "totalWins_onTHEseason", "totalLosses_onTHEseason", "homeWins_onTHEseason", "homeLosses_onTHEseason", "awayWins_onTHEseason", "awayLosses_onTHEseason", "awardId", "player_twitter", "team_twitter", ] set(X_columns) == set(df_X) def preprocess(data, test_y): # preprocess submission y merge_start = test_y.copy() merge_start.drop(columns=["date_playerId"], inplace=True) merge_start["engagementMetricsDate"] = merge_start["date"].copy() merge_start["date"] = pd.to_datetime(merge_start["date"], format="%Y%m%d") merge_start["date"] = pd.DatetimeIndex(merge_start["date"]) - pd.DateOffset(1) merge_start["date"] = merge_start["date"].astype(str) merge_start["engagementMetricsDate"] = pd.to_datetime( merge_start["engagementMetricsDate"], format="%Y%m%d" ) merge_start["engagementMetricsDate"] = merge_start["engagementMetricsDate"].astype( str ) rosters = merge_nested_form(data, "rosters") # rosters 原本一共有6列: # playerId gameDate teamId statusCode status # 现在仅保留有用的4列 rosters = rosters[["playerId", "gameDate", "teamId", "status"]] # 并且 pd.isnull(train['rosters']).sum() 的结果为0,也就表示,train['rosters'] 这一列每一行都有 value # 所以,gameDate 应该就是 date rosters.rename(columns={"gameDate": "date"}, inplace=True) # playerTwitterFollowers = merge_nested_form(data, 'playerTwitterFollowers') # print(playerTwitterFollowers) # # playerTwitterFollowers 原本一共有6列: # # date playerId playerName accountName twitterHandle numberOfFollowers # # 现在仅保留有用的3列 # playerTwitterFollowers = playerTwitterFollowers[['date','playerId','numberOfFollowers']] # # 嵌套表格以'date'为列名称的,数据格式好像全部都是 datetime,需要全部转换成 str # # 比如 transactions,playerTwitterFollowers,teamTwitterFollowers 表 # # 但是以'gameDate'为列名称的,数据格式好像全部都是 str # playerTwitterFollowers['date'] = playerTwitterFollowers['date'].astype(str) # playerTwitterFollowers.rename(columns={'numberOfFollowers':'player_twitter'},inplace=True) # teamTwitterFollowers = merge_nested_form(data, 'teamTwitterFollowers') # # teamTwitterFollowers 原本一共有6列: # # date teamId teamName accountName twitterHandle numberOfFollowers # # 现在仅保留有用的3列 # teamTwitterFollowers = teamTwitterFollowers[['date','teamId','numberOfFollowers']] # # 嵌套表格以'date'为列名称的,数据格式好像全部都是 datetime,需要全部转换成 str # # 比如 transactions,playerTwitterFollowers,teamTwitterFollowers 表 # # 但是以'gameDate'为列名称的,数据格式好像全部都是 str # teamTwitterFollowers['date'] = teamTwitterFollowers['date'].astype(str) # teamTwitterFollowers.rename(columns={'numberOfFollowers':'team_twitter'},inplace=True) games = merge_nested_form(data, "games") # gameType """ [{"id":"S","description":"Spring Training"}, {"id":"R","description":"Regular Season"}, {"id":"F","description":"Wild Card Game"}, {"id":"D","description":"Division Series"}, {"id":"L","description":"League Championship Series"}, {"id":"W","description":"World Series"}, {"id":"C","description":"Championship"}, {"id":"N","description":"Nineteenth Century Series"}, {"id":"P","description":"Playoffs"}, {"id":"A","description":"All-Star Game"}, {"id":"I","description":"Intrasquad"}, {"id":"E","description":"Exhibition"}] """ # 只有10场比赛存在resume,所以相关变量可以删除了 # 只有116场比赛 istie 那一列是1,可以删除 # 改一下日期的 column name,方便后面 merge games.rename(columns={"gameDate": "date"}, inplace=True) # 把 game 表格分程两部分,主场球队的信息,和客场球队的信息 # 两部分都包括比赛的编号和类型 # 仅摘取我们认为比较有用的信息 games_home = games[ ["seriesDescription", "date", "homeId", "homeWinPct", "homeScore"] ] games_away = games[ ["seriesDescription", "date", "awayId", "awayWinPct", "awayScore"] ] # 创建一个 feature 用于判断是主场还是客场 games_home["home"] = pd.Series() games_home["home"] = games_home["home"].fillna(1).astype(int) games_away["home"] = pd.Series() games_away["home"] = games_away["home"].fillna(0).astype(int) # 为了后面主客场两站表的融合,改一下 columns name games_home.rename( columns={ "homeId": "teamId", "homeWinner": "Win", "homeWinPct": "Current_winning_percentage", "homeScore": "Score", }, inplace=True, ) games_away.rename( columns={ "awayId": "teamId", "awayWinner": "Win", "awayWinPct": "Current_winning_percentage", "awayScore": "Score", }, inplace=True, ) games_new = pd.concat([games_home, games_away], axis=0) index = pd.Series(list(range(0, games_new.shape[0]))) # 给整张表重置一下index games_new = games_new.set_index(index) playerBoxScores = merge_nested_form(data, "playerBoxScores") # 改一下日期的 column name,方便后面 merge playerBoxScores.rename(columns={"gameDate": "date"}, inplace=True) # 因为这张表计划大部分内容保留,所以只把没用的删除就好 playerBoxScores.drop( columns=["gameTimeUTC", "teamName", "playerName", "home", "gamePk"], inplace=True, ) teamBoxScores = merge_nested_form(data, "teamBoxScores") # 改一下日期的 column name,方便后面 merge teamBoxScores.rename(columns={"gameDate": "date"}, inplace=True) teamBoxScores.drop(["home", "gamePk"], axis=1, inplace=True) standings = merge_nested_form(data, "standings") # 改一下日期的 column name,方便后面 merge standings.rename(columns={"gameDate": "date"}, inplace=True) standings_new = standings[ [ "date", "teamId", "divisionRank", "leagueRank", "wins", "losses", "homeWins", "homeLosses", "awayWins", "awayLosses", ] ] standings_new.rename( columns={ "wins": "totalWins_onTHEseason", "losses": "totalLosses_onTHEseason", "homeWins": "homeWins_onTHEseason", "homeLosses": "homeLosses_onTHEseason", "awayWins": "awayWins_onTHEseason", "awayLosses": "awayLosses_onTHEseason", }, inplace=True, ) awards = merge_nested_form(data, "awards") # awards['awardId'].unique().shape[0] = 399 # 一共有399种不同的奖项 # 改一下日期的 column name,方便后面 merge awards.rename(columns={"awardDate": "date"}, inplace=True) # 仅保留有用的几列 if awards.empty: awards["awardId"] = np.nan awards["date"] = merge_start["date"] awards["playerId"] = merge_start["playerId"] else: awards = awards[["awardId", "date", "playerId"]] # transactions = merge_nested_form(data, 'transactions') # print("transactions:", transactions) # # transactions['typeDesc'].unique().shape[0] = 16 # # 一共有16种不同的 transactions # # 嵌套表格以'date'为列名称的,数据格式好像全部都是 datetime,需要全部转换成 str # # 比如 transactions,playerTwitterFollowers,teamTwitterFollowers 表 # # 但是以'gameDate'为列名称的,数据格式好像全部都是 str # transactions['date'] = transactions['date'].astype(str) # # 有375行里,playerId 是 nan,就尼玛离谱,需要全部删除,然后再重置一下 index # delete_index = transactions.loc[transactions['playerId'].isnull()==True].index # transactions.drop(index = delete_index, inplace = True) # reset_index = pd.Series(list(range(0,transactions.shape[0]))) # 给整张表重置一下index # transactions = transactions.set_index(reset_index) # # 这个 playerId 默认有小数点就你妈离谱 # transactions['playerId'] = transactions['playerId'].astype(int) # # transactions['toTeamId'].isnull().sum()==0 # # 我们可以认为,没有球员被解聘,只存在两种情况:从别的队挖人or招募新人 # transactions.drop(columns=['transactionId','playerName','fromTeamName','toTeamName','typeCode','description'],inplace=True) games_new_new = unique_record(games_new, "date", "teamId", "date_teamId") games_new_new.drop(columns="date_teamId", inplace=True) games_new_new["teamId"] = games_new_new["teamId"].astype(int) playerBoxScores_new = unique_record( playerBoxScores, "date", "playerId", "date_playerId" ) playerBoxScores_new.drop(columns="date_playerId", inplace=True) playerBoxScores_new["playerId"] = playerBoxScores_new["playerId"].astype(int) teamBoxScores_new = unique_record(teamBoxScores, "date", "teamId", "date_teamId") teamBoxScores_new.drop(columns="date_teamId", inplace=True) teamBoxScores_new["teamId"] = playerBoxScores_new["teamId"].astype(int) awards_new = unique_record(awards, "date", "playerId", "date_playerId") awards_new.drop(columns="date_playerId", inplace=True) awards_new["playerId"] = awards_new["playerId"].astype(int) gc.collect() # merge all merge_rosters = merge_two_nested_form( merge_start, rosters, key=["playerId", "date"] ) gc.collect() merge_games = merge_two_nested_form( merge_rosters, games_new_new, key=["teamId", "date"] ) gc.collect() merge_PScores = merge_two_nested_form( merge_games, playerBoxScores_new, key=["playerId", "teamId", "date"] ) gc.collect() merge_standings = merge_two_nested_form( merge_PScores, standings_new, key=["teamId", "date"] ) gc.collect() merge_awards = merge_two_nested_form( merge_standings, awards_new, key=["playerId", "date"] ) gc.collect() merge_final = merge_awards.copy() gc.collect() merge = merge_final merge["date"] = pd.to_datetime(merge["date"], format="%Y-%m-%d") merge["engagementMetricsDate"] = pd.to_datetime( merge["engagementMetricsDate"], format="%Y-%m-%d" ) df = merge.copy() df["playerId"] = df["playerId"].astype("str") df["playerId"] = df["playerId"].map(encode_dict["playerId"]) df["teamId"] = df["teamId"].apply(trim_fraction) df["teamId"] = df["teamId"].map(encode_dict["teamId"]) df["status"] = df["status"].astype("str") df["status"] = df["status"].map(encode_dict["status"]) df["seriesDescription"] = df["seriesDescription"].astype("str") df["seriesDescription"] = df["seriesDescription"].map( encode_dict["seriesDescription"] ) df["positionName"] = df["positionName"].astype("str") df["positionName"] = df["positionName"].map(encode_dict["positionName"]) df["positionType"] = df["positionType"].astype("str") df["positionType"] = df["positionType"].map(encode_dict["positionType"]) df["awardId"] = df["awardId"].astype("str") df["awardId"] = df["awardId"].map(encode_dict["awardId"]) # df.playerId.update(pd.Series(encode_dict["playerId"])) df["player_twitter"] = df["playerId"].map(ptt_dict) df["team_twitter"] = df["playerId"].map(ttt_dict) # dict_keys(['playerId', 'teamId', 'status', 'seriesDescription', 'positionName', 'positionType', 'awardId']) df = df.fillna(-1) df.drop( columns=[ "target1", "target2", "target3", "target4", "engagementMetricsDate", "date", ], inplace=True, ) pd.set_option("display.max_columns", None) # print(df.columns) df = df[X_columns] # print("df",df) return df env = mlb.make_env() # initialize the environment iter_test = env.iter_test() # iterator which loops over each date in test set test_data = pd.DataFrame() sub_sample = pd.DataFrame() # for (test_df, sample_prediction_df) in iter_test: # make predictions here # sample_prediction_df = sample_prediction_df.reset_index(drop=True) # # creat dataset # sample_prediction_df['playerId'] = sample_prediction_df['date_playerId']\ # .map(lambda x: int(x.split('_')[1])) # sample_prediction_df['date'] = sample_prediction_df['date_playerId']\ # .map(lambda x: int(x.split('_')[0])) # print(sample_prediction_df) for test_df, sample_prediction_df in iter_test: # make predictions here sample_prediction_df = sample_prediction_df.reset_index(drop=True) # creat dataset sample_prediction_df["playerId"] = sample_prediction_df["date_playerId"].map( lambda x: int(x.split("_")[1]) ) sample_prediction_df["date"] = sample_prediction_df["date_playerId"].map( lambda x: int(x.split("_")[0]) ) # Dealing with missing values test_X = preprocess(test_df, sample_prediction_df) # test_data = pd.concat([test_data,test_df], axis=0) # sub_sample = pd.concat([sub_sample,sample_prediction_df], axis=0) # env.predict(sample_prediction_df) # # predict pred1 = model1.predict(test_X) pred2 = model2.predict(test_X) pred3 = model3.predict(test_X) pred4 = model4.predict(test_X) # merge submission sample_prediction_df["target1"] = np.clip(pred1, 0, 100) sample_prediction_df["target2"] = np.clip(pred2, 0, 100) sample_prediction_df["target3"] = np.clip(pred3, 0, 100) sample_prediction_df["target4"] = np.clip(pred4, 0, 100) sample_prediction_df = sample_prediction_df.fillna(0.0) # sample_prediction_df["date"] = sample_prediction_df["date"].replace({"-":""}, regex=True) # sample_prediction_df["playerId"] = sample_prediction_df["playerId"].astype(str) # sample_prediction_df["date_playerId"] = sample_prediction_df["date"]+"_"+sample_prediction_df["playerId"] del sample_prediction_df["playerId"] del sample_prediction_df["date"] # sample_prediction_df = sample_prediction_df[["date_playerId","target1","target2","target3","target4"]] final_headers = ["date_playerId", "target1", "target2", "target3", "target4"] sample_prediction_df = sample_prediction_df[final_headers].copy() # sample_prediction_df["target1"] = sample_prediction_df["target1"].round(6) # sample_prediction_df["target2"] = sample_prediction_df["target2"].round(6) # sample_prediction_df["target3"] = sample_prediction_df["target3"].round(6) # sample_prediction_df["target4"] = sample_prediction_df["target4"].round(6) env.predict(sample_prediction_df) # test_data.to_csv("test_data2.csv",index = False) # sub_sample.to_csv("sub_sample.csv", index = False) sub = pd.read_csv("./submission.csv") sub # sub.isnull().sum() # sub.dtypes # tmpsub # for i, v in sub.iterrows(): # tmpsub.loc[tmpsub["date_playerId"]==v['date_playerId'],"target1"] = v["target1"] # tmpsub.loc[tmpsub["date_playerId"]==v['date_playerId'],"target2"] = v["target2"] # tmpsub.loc[tmpsub["date_playerId"]==v['date_playerId'],"target3"] = v["target3"] # tmpsub.loc[tmpsub["date_playerId"]==v['date_playerId'],"target4"] = v["target4"] # tmpsub # tmpsub.to_csv('./submission.csv')
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/444/69444421.ipynb
new-model
aicssu
[{"Id": 69444421, "ScriptId": 18800100, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5518723, "CreationDate": "07/31/2021 05:20:18", "VersionNumber": 21.0, "Title": "csFinal", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 574.0, "LinesInsertedFromPrevious": 17.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 557.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92620728, "KernelVersionId": 69444421, "SourceDatasetVersionId": 2477099}]
[{"Id": 2477099, "DatasetId": 1499116, "DatasourceVersionId": 2519617, "CreatorUserId": 5518723, "LicenseName": "Unknown", "CreationDate": "07/29/2021 15:21:17", "VersionNumber": 1.0, "Title": "new_model", "Slug": "new-model", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1499116, "CreatorUserId": 5518723, "OwnerUserId": 5518723.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2477099.0, "CurrentDatasourceVersionId": 2519617.0, "ForumId": 1518845, "Type": 2, "CreationDate": "07/29/2021 15:21:17", "LastActivityDate": "07/29/2021", "TotalViews": 718, "TotalDownloads": 3, "TotalVotes": 0, "TotalKernels": 2}]
[{"Id": 5518723, "UserName": "aicssu", "DisplayName": "AICSSU", "RegisterDate": "07/24/2020", "PerformanceTier": 1}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("../input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import mlb import json import gc # load model import joblib model_path = "../input/new-model/" model1 = joblib.load(model_path + "new_model1.pkl") model2 = joblib.load(model_path + "new_model2.pkl") model3 = joblib.load(model_path + "new_model3.pkl") model4 = joblib.load(model_path + "new_model4.pkl") def merge_nested_form(form, column): result = pd.DataFrame() for index, value in form[column].items(): value = str(value) if (value != "nan") and (value is not np.nan): unpack = pd.read_json(value) result = pd.concat([result, unpack], axis=0) index = pd.Series(list(range(0, result.shape[0]))) # 给整张表重置一下index result = result.set_index(index) return result # def merge_nested_form(form, name): # l = [] # rows = len(form[name]) # print(name) # print('rows:',rows) # for i in range(rows): # # for now if nan, we skip, NEED TO CHANGE later # print('type:',type(form[name].iloc[i])) # # print('ele:',form[name].iloc[i]) # json_str = str(form[name].iloc[i]) # print(json_str == 'nan') # # print('str:',json_str) # if(json_str=='nan' or pd.isna(json_str)): # continue # l.append(pd.read_json(json_str)) # mergedDF = pd.concat(l, ignore_index = True) # return mergedDF def unpack_certain_column_and_concat_date(form, column): form = form.copy() result = pd.DataFrame() # 先创建一个空白表格 for index, value in form[column].iteritems(): # 把这一列逐行遍历 if pd.isnull(index): # 注意判断方法 continue else: unpack = pd.read_json(value) # 把有内容的单元解压缩成df date = form["date"].loc[index] # 找到 unpack 出的这个表格对应的日期是多少 rows_amount = unpack.shape[0] # 看一下这个 unpack 的表格一共有多少行 unpack["date"] = pd.Series(data=[int(date)] * rows_amount) # 因为整张 unpack 表都对应一个日期,所以用乘法的方法,把这个日期广播到 date 那一列的每一行 result = pd.concat([result, unpack], axis=0) # 把解压缩出来的内容和每个拼在一起 # 刚才为了实现广播这个操作,date的格式是int # 现在需要把 date 那一列的格式改成 str,因为其他表 date 那一列都是 str,不改的话后面 merge 很麻烦 # 但是需要先转成 datetime 格式,因为年份月份和日期之间需要加上分界线 result["date"] = pd.to_datetime(result["date"], format="%Y%m%d") result["date"] = result["date"].dt.date result["date"] = result["date"].astype(str) index = pd.Series(list(range(0, result.shape[0]))) # 给整张表重置一下index result = result.set_index(index) return result def unique_record(data, column1, column2, new_column): data = data.copy() data[column1] = data[column1].astype(str) data[column2] = data[column2].astype(str) data[new_column] = data[column1] + data[column2] # 到此为止,给原表重新创造了一列,后面要以这一列为基准 temp = data.groupby( new_column ).max() # 这里应该是 groupby().max(),这个函数好像是没保存,跑一下如果报错了就是这个有问题 # 把一天之内参加了多场比赛的球队/队员的信息保留最大值 # 之所以保留最大值,是因为这样也能保留下字符串的列 # 但是这一步会导致 index 的错乱,同时 string 的列会被自动删除 temp[new_column] = temp.index reset_index = pd.Series(list(range(0, temp.shape[0]))) # 给整张表重置一下index temp = temp.set_index(reset_index) # 因为刚才的 groupby 计算搞乱了 index,需要重置一下 """ categorical_columns = data[data.columns[data.dtypes == object]] # 这一步就很牛逼了:把数据格式为 object 的列拎出来 # 注意,被拎出来的列中一定包括 columns1,column2,和 new_column result = pd.merge(temp,categorical_columns,on=new_column,how='left') """ # 因为刚才的 groupby 函数使用的是 max,不会对字符串的列产生影响,否则还要想办法处理这些列 # 不过写在这里的处理方法也不是完美的,会导致一部分的数据被迫丢失 result = temp return result def merge_two_nested_form(primary_form, secondary_form, key): primary_form = primary_form.copy() secondary_form = secondary_form.copy() result = pd.DataFrame() merge_area = round(primary_form.shape[0] / 100) # 每次 merge 1% start = 0 for n in range(0, 100): start = n * merge_area end = min(start + merge_area, primary_form.shape[0]) temp = pd.merge( primary_form.iloc[start:end], secondary_form, on=key, # 加这个参数很重要,不然的话电脑需要一个一个去匹配,内存会爆 how="left", ) result = result.append(temp) gc.collect() return result def target_encode(data, y, a=0, p=0): # 输入的 data 格式全部都是 category value data = data.copy() columns = data.columns.drop(y) for n in columns: temp = data.dropna(subset=[n]) # 必须先复制一个表(没用copy是因为dropna默认inpalce为否),然后把groupby函数括号中的那一列(也就是n那一列)中的nan全都给删掉,然后再按照n那一列groupby # 不然会报错,因为groupby的时候是自动dropna的,transform 返回结果的行数就少了,和原来的列表行数不匹配 # 因为pandas要求transform返回的series行数必须和原表的行数相同,即使不把transform的结果插入原表,系统也这么要求自己 # 注意,是 transform 结果的长度不匹配,不是插入新列的时候长度不匹配。所以我们先得到transform的结果,在按照index插入原表就不存在行数不匹配的问题 data[f"{n}(target)"] = temp.groupby(n)[y].transform( lambda x: (np.sum(x) + a * p) / (x.count() + a) ) # 按照每一列 groupby,然后把每一种值对应的y给整理出来,求和 和 计数 分别作为分子分母 # a 和 p 是两个超参数,用于避免 overfitting,默认为0 data.drop(columns=list(columns), inplace=True) return data # dict_path = '/kaggle/input/encoded-dict/' # encode_dict = json.load(open(dict_path+"dict.txt")) # tt_dict_path = '/kaggle/input/twitter-encoded-dict/' # ptt_dict = json.load(open(tt_dict_path+"ptt_dict.txt")) # ttt_dict = json.load(open(tt_dict_path+"ttt_dict.txt")) # twitter_info = pd.read_csv("../input/twitter-info-csv/twitter_info.csv") # ptt_dict = pd.Series(twitter_info['playerId(target)'].values,index=twitter_info.player_twitter).to_dict() # ttt_dict = pd.Series(twitter_info['playerId(target)'].values,index=twitter_info.team_twitter).to_dict() def twitter_fillna(data, player_twitter, team_twitter, playerId, teamId): data = data.copy() # 先把所有的 player_twitter 项给 fillna 一下 # 对于那些有记录的 palyer 来说,由于记录发生在每月的1号,所以这一个月的 player_twitter 全部按照1号的填充 # 可能有些人有的月份没有 player_twitter 记录,那么就按上一个月的记录延续,如果是开头几个月没有,那么就按最早的记录填充 # 所以先 forward fill,在 back fill data[player_twitter] = data.groupby(playerId)[player_twitter].fillna(method="ffill") data[player_twitter] = data.groupby(playerId)[player_twitter].fillna(method="bfill") # 但是这么操作一圈以后,有些人他们一条 player_twitter 记录都没有,只能全部-1了 data[player_twitter] = data[player_twitter].fillna(-1) # 接下来处理 team_twitter # 对于那些有 teamId 的行,team_twitter 的缺失主要是由于 team_twitter 的记录都在每个月的1号 data[team_twitter] = data.groupby( [teamId], )[ team_twitter ].fillna(method="ffill") # 然后把没有 teamId 记录的 player_date,全部 team_twitter =-1 data[team_twitter] = data[team_twitter].loc[data[teamId].isnull() == True] = -1 return data def var_type_judge(data): data = data.copy() result = pd.DataFrame(index=data.columns, columns=["variable type"]) for c, n in data.iteritems(): if data[c].unique().shape[0] == data[c].shape[0]: result.loc[c] = "ID" elif data[c].dtype == "int64" or data[c].dtype == "float64": result.loc[c] = "numerical" elif data[c].dtype == "O": result.loc[c] = "catagorical" else: result.loc[c] = "?????????" return result def trim_fraction(text): text = str(text) if ".0" in text: return text[: text.rfind(".0")] return text X_columns = [ "playerId", "teamId", "player_twitter", "team_twitter", "status", "awardId", "seriesDescription", "Current_winning_percentage", "Score", "home", "jerseyNum", "positionCode", "battingOrder", "gamesPlayedBatting", "flyOuts", "groundOuts", "runsScored", "doubles", "triples", "homeRuns", "strikeOuts", "baseOnBalls", "intentionalWalks", "hits", "hitByPitch", "atBats", "caughtStealing", "stolenBases", "groundIntoDoublePlay", "groundIntoTriplePlay", "plateAppearances", "totalBases", "rbi", "leftOnBase", "sacBunts", "sacFlies", "catchersInterference", "pickoffs", "gamesPlayedPitching", "gamesStartedPitching", "completeGamesPitching", "shutoutsPitching", "winsPitching", "lossesPitching", "flyOutsPitching", "airOutsPitching", "groundOutsPitching", "runsPitching", "doublesPitching", "triplesPitching", "homeRunsPitching", "strikeOutsPitching", "baseOnBallsPitching", "intentionalWalksPitching", "hitsPitching", "hitByPitchPitching", "atBatsPitching", "caughtStealingPitching", "stolenBasesPitching", "inningsPitched", "saveOpportunities", "earnedRuns", "battersFaced", "outsPitching", "pitchesThrown", "balls", "strikes", "hitBatsmen", "balks", "wildPitches", "pickoffsPitching", "rbiPitching", "gamesFinishedPitching", "inheritedRunners", "inheritedRunnersScored", "catchersInterferencePitching", "sacBuntsPitching", "sacFliesPitching", "saves", "holds", "blownSaves", "assists", "putOuts", "errors", "chances", "divisionRank", "leagueRank", "totalWins_onTHEseason", "totalLosses_onTHEseason", "homeWins_onTHEseason", "homeLosses_onTHEseason", "awayWins_onTHEseason", "awayLosses_onTHEseason", "positionName", "positionType", ] df_X = [ "playerId", "teamId", "status", "seriesDescription", "Current_winning_percentage", "Score", "home", "jerseyNum", "positionCode", "positionName", "positionType", "battingOrder", "gamesPlayedBatting", "flyOuts", "groundOuts", "runsScored", "doubles", "triples", "homeRuns", "strikeOuts", "baseOnBalls", "intentionalWalks", "hits", "hitByPitch", "atBats", "caughtStealing", "stolenBases", "groundIntoDoublePlay", "groundIntoTriplePlay", "plateAppearances", "totalBases", "rbi", "leftOnBase", "sacBunts", "sacFlies", "catchersInterference", "pickoffs", "gamesPlayedPitching", "gamesStartedPitching", "completeGamesPitching", "shutoutsPitching", "winsPitching", "lossesPitching", "flyOutsPitching", "airOutsPitching", "groundOutsPitching", "runsPitching", "doublesPitching", "triplesPitching", "homeRunsPitching", "strikeOutsPitching", "baseOnBallsPitching", "intentionalWalksPitching", "hitsPitching", "hitByPitchPitching", "atBatsPitching", "caughtStealingPitching", "stolenBasesPitching", "inningsPitched", "saveOpportunities", "earnedRuns", "battersFaced", "outsPitching", "pitchesThrown", "balls", "strikes", "hitBatsmen", "balks", "wildPitches", "pickoffsPitching", "rbiPitching", "gamesFinishedPitching", "inheritedRunners", "inheritedRunnersScored", "catchersInterferencePitching", "sacBuntsPitching", "sacFliesPitching", "saves", "holds", "blownSaves", "assists", "putOuts", "errors", "chances", "divisionRank", "leagueRank", "totalWins_onTHEseason", "totalLosses_onTHEseason", "homeWins_onTHEseason", "homeLosses_onTHEseason", "awayWins_onTHEseason", "awayLosses_onTHEseason", "awardId", "player_twitter", "team_twitter", ] set(X_columns) == set(df_X) def preprocess(data, test_y): # preprocess submission y merge_start = test_y.copy() merge_start.drop(columns=["date_playerId"], inplace=True) merge_start["engagementMetricsDate"] = merge_start["date"].copy() merge_start["date"] = pd.to_datetime(merge_start["date"], format="%Y%m%d") merge_start["date"] = pd.DatetimeIndex(merge_start["date"]) - pd.DateOffset(1) merge_start["date"] = merge_start["date"].astype(str) merge_start["engagementMetricsDate"] = pd.to_datetime( merge_start["engagementMetricsDate"], format="%Y%m%d" ) merge_start["engagementMetricsDate"] = merge_start["engagementMetricsDate"].astype( str ) rosters = merge_nested_form(data, "rosters") # rosters 原本一共有6列: # playerId gameDate teamId statusCode status # 现在仅保留有用的4列 rosters = rosters[["playerId", "gameDate", "teamId", "status"]] # 并且 pd.isnull(train['rosters']).sum() 的结果为0,也就表示,train['rosters'] 这一列每一行都有 value # 所以,gameDate 应该就是 date rosters.rename(columns={"gameDate": "date"}, inplace=True) # playerTwitterFollowers = merge_nested_form(data, 'playerTwitterFollowers') # print(playerTwitterFollowers) # # playerTwitterFollowers 原本一共有6列: # # date playerId playerName accountName twitterHandle numberOfFollowers # # 现在仅保留有用的3列 # playerTwitterFollowers = playerTwitterFollowers[['date','playerId','numberOfFollowers']] # # 嵌套表格以'date'为列名称的,数据格式好像全部都是 datetime,需要全部转换成 str # # 比如 transactions,playerTwitterFollowers,teamTwitterFollowers 表 # # 但是以'gameDate'为列名称的,数据格式好像全部都是 str # playerTwitterFollowers['date'] = playerTwitterFollowers['date'].astype(str) # playerTwitterFollowers.rename(columns={'numberOfFollowers':'player_twitter'},inplace=True) # teamTwitterFollowers = merge_nested_form(data, 'teamTwitterFollowers') # # teamTwitterFollowers 原本一共有6列: # # date teamId teamName accountName twitterHandle numberOfFollowers # # 现在仅保留有用的3列 # teamTwitterFollowers = teamTwitterFollowers[['date','teamId','numberOfFollowers']] # # 嵌套表格以'date'为列名称的,数据格式好像全部都是 datetime,需要全部转换成 str # # 比如 transactions,playerTwitterFollowers,teamTwitterFollowers 表 # # 但是以'gameDate'为列名称的,数据格式好像全部都是 str # teamTwitterFollowers['date'] = teamTwitterFollowers['date'].astype(str) # teamTwitterFollowers.rename(columns={'numberOfFollowers':'team_twitter'},inplace=True) games = merge_nested_form(data, "games") # gameType """ [{"id":"S","description":"Spring Training"}, {"id":"R","description":"Regular Season"}, {"id":"F","description":"Wild Card Game"}, {"id":"D","description":"Division Series"}, {"id":"L","description":"League Championship Series"}, {"id":"W","description":"World Series"}, {"id":"C","description":"Championship"}, {"id":"N","description":"Nineteenth Century Series"}, {"id":"P","description":"Playoffs"}, {"id":"A","description":"All-Star Game"}, {"id":"I","description":"Intrasquad"}, {"id":"E","description":"Exhibition"}] """ # 只有10场比赛存在resume,所以相关变量可以删除了 # 只有116场比赛 istie 那一列是1,可以删除 # 改一下日期的 column name,方便后面 merge games.rename(columns={"gameDate": "date"}, inplace=True) # 把 game 表格分程两部分,主场球队的信息,和客场球队的信息 # 两部分都包括比赛的编号和类型 # 仅摘取我们认为比较有用的信息 games_home = games[ ["seriesDescription", "date", "homeId", "homeWinPct", "homeScore"] ] games_away = games[ ["seriesDescription", "date", "awayId", "awayWinPct", "awayScore"] ] # 创建一个 feature 用于判断是主场还是客场 games_home["home"] = pd.Series() games_home["home"] = games_home["home"].fillna(1).astype(int) games_away["home"] = pd.Series() games_away["home"] = games_away["home"].fillna(0).astype(int) # 为了后面主客场两站表的融合,改一下 columns name games_home.rename( columns={ "homeId": "teamId", "homeWinner": "Win", "homeWinPct": "Current_winning_percentage", "homeScore": "Score", }, inplace=True, ) games_away.rename( columns={ "awayId": "teamId", "awayWinner": "Win", "awayWinPct": "Current_winning_percentage", "awayScore": "Score", }, inplace=True, ) games_new = pd.concat([games_home, games_away], axis=0) index = pd.Series(list(range(0, games_new.shape[0]))) # 给整张表重置一下index games_new = games_new.set_index(index) playerBoxScores = merge_nested_form(data, "playerBoxScores") # 改一下日期的 column name,方便后面 merge playerBoxScores.rename(columns={"gameDate": "date"}, inplace=True) # 因为这张表计划大部分内容保留,所以只把没用的删除就好 playerBoxScores.drop( columns=["gameTimeUTC", "teamName", "playerName", "home", "gamePk"], inplace=True, ) teamBoxScores = merge_nested_form(data, "teamBoxScores") # 改一下日期的 column name,方便后面 merge teamBoxScores.rename(columns={"gameDate": "date"}, inplace=True) teamBoxScores.drop(["home", "gamePk"], axis=1, inplace=True) standings = merge_nested_form(data, "standings") # 改一下日期的 column name,方便后面 merge standings.rename(columns={"gameDate": "date"}, inplace=True) standings_new = standings[ [ "date", "teamId", "divisionRank", "leagueRank", "wins", "losses", "homeWins", "homeLosses", "awayWins", "awayLosses", ] ] standings_new.rename( columns={ "wins": "totalWins_onTHEseason", "losses": "totalLosses_onTHEseason", "homeWins": "homeWins_onTHEseason", "homeLosses": "homeLosses_onTHEseason", "awayWins": "awayWins_onTHEseason", "awayLosses": "awayLosses_onTHEseason", }, inplace=True, ) awards = merge_nested_form(data, "awards") # awards['awardId'].unique().shape[0] = 399 # 一共有399种不同的奖项 # 改一下日期的 column name,方便后面 merge awards.rename(columns={"awardDate": "date"}, inplace=True) # 仅保留有用的几列 if awards.empty: awards["awardId"] = np.nan awards["date"] = merge_start["date"] awards["playerId"] = merge_start["playerId"] else: awards = awards[["awardId", "date", "playerId"]] # transactions = merge_nested_form(data, 'transactions') # print("transactions:", transactions) # # transactions['typeDesc'].unique().shape[0] = 16 # # 一共有16种不同的 transactions # # 嵌套表格以'date'为列名称的,数据格式好像全部都是 datetime,需要全部转换成 str # # 比如 transactions,playerTwitterFollowers,teamTwitterFollowers 表 # # 但是以'gameDate'为列名称的,数据格式好像全部都是 str # transactions['date'] = transactions['date'].astype(str) # # 有375行里,playerId 是 nan,就尼玛离谱,需要全部删除,然后再重置一下 index # delete_index = transactions.loc[transactions['playerId'].isnull()==True].index # transactions.drop(index = delete_index, inplace = True) # reset_index = pd.Series(list(range(0,transactions.shape[0]))) # 给整张表重置一下index # transactions = transactions.set_index(reset_index) # # 这个 playerId 默认有小数点就你妈离谱 # transactions['playerId'] = transactions['playerId'].astype(int) # # transactions['toTeamId'].isnull().sum()==0 # # 我们可以认为,没有球员被解聘,只存在两种情况:从别的队挖人or招募新人 # transactions.drop(columns=['transactionId','playerName','fromTeamName','toTeamName','typeCode','description'],inplace=True) games_new_new = unique_record(games_new, "date", "teamId", "date_teamId") games_new_new.drop(columns="date_teamId", inplace=True) games_new_new["teamId"] = games_new_new["teamId"].astype(int) playerBoxScores_new = unique_record( playerBoxScores, "date", "playerId", "date_playerId" ) playerBoxScores_new.drop(columns="date_playerId", inplace=True) playerBoxScores_new["playerId"] = playerBoxScores_new["playerId"].astype(int) teamBoxScores_new = unique_record(teamBoxScores, "date", "teamId", "date_teamId") teamBoxScores_new.drop(columns="date_teamId", inplace=True) teamBoxScores_new["teamId"] = playerBoxScores_new["teamId"].astype(int) awards_new = unique_record(awards, "date", "playerId", "date_playerId") awards_new.drop(columns="date_playerId", inplace=True) awards_new["playerId"] = awards_new["playerId"].astype(int) gc.collect() # merge all merge_rosters = merge_two_nested_form( merge_start, rosters, key=["playerId", "date"] ) gc.collect() merge_games = merge_two_nested_form( merge_rosters, games_new_new, key=["teamId", "date"] ) gc.collect() merge_PScores = merge_two_nested_form( merge_games, playerBoxScores_new, key=["playerId", "teamId", "date"] ) gc.collect() merge_standings = merge_two_nested_form( merge_PScores, standings_new, key=["teamId", "date"] ) gc.collect() merge_awards = merge_two_nested_form( merge_standings, awards_new, key=["playerId", "date"] ) gc.collect() merge_final = merge_awards.copy() gc.collect() merge = merge_final merge["date"] = pd.to_datetime(merge["date"], format="%Y-%m-%d") merge["engagementMetricsDate"] = pd.to_datetime( merge["engagementMetricsDate"], format="%Y-%m-%d" ) df = merge.copy() df["playerId"] = df["playerId"].astype("str") df["playerId"] = df["playerId"].map(encode_dict["playerId"]) df["teamId"] = df["teamId"].apply(trim_fraction) df["teamId"] = df["teamId"].map(encode_dict["teamId"]) df["status"] = df["status"].astype("str") df["status"] = df["status"].map(encode_dict["status"]) df["seriesDescription"] = df["seriesDescription"].astype("str") df["seriesDescription"] = df["seriesDescription"].map( encode_dict["seriesDescription"] ) df["positionName"] = df["positionName"].astype("str") df["positionName"] = df["positionName"].map(encode_dict["positionName"]) df["positionType"] = df["positionType"].astype("str") df["positionType"] = df["positionType"].map(encode_dict["positionType"]) df["awardId"] = df["awardId"].astype("str") df["awardId"] = df["awardId"].map(encode_dict["awardId"]) # df.playerId.update(pd.Series(encode_dict["playerId"])) df["player_twitter"] = df["playerId"].map(ptt_dict) df["team_twitter"] = df["playerId"].map(ttt_dict) # dict_keys(['playerId', 'teamId', 'status', 'seriesDescription', 'positionName', 'positionType', 'awardId']) df = df.fillna(-1) df.drop( columns=[ "target1", "target2", "target3", "target4", "engagementMetricsDate", "date", ], inplace=True, ) pd.set_option("display.max_columns", None) # print(df.columns) df = df[X_columns] # print("df",df) return df env = mlb.make_env() # initialize the environment iter_test = env.iter_test() # iterator which loops over each date in test set test_data = pd.DataFrame() sub_sample = pd.DataFrame() # for (test_df, sample_prediction_df) in iter_test: # make predictions here # sample_prediction_df = sample_prediction_df.reset_index(drop=True) # # creat dataset # sample_prediction_df['playerId'] = sample_prediction_df['date_playerId']\ # .map(lambda x: int(x.split('_')[1])) # sample_prediction_df['date'] = sample_prediction_df['date_playerId']\ # .map(lambda x: int(x.split('_')[0])) # print(sample_prediction_df) for test_df, sample_prediction_df in iter_test: # make predictions here sample_prediction_df = sample_prediction_df.reset_index(drop=True) # creat dataset sample_prediction_df["playerId"] = sample_prediction_df["date_playerId"].map( lambda x: int(x.split("_")[1]) ) sample_prediction_df["date"] = sample_prediction_df["date_playerId"].map( lambda x: int(x.split("_")[0]) ) # Dealing with missing values test_X = preprocess(test_df, sample_prediction_df) # test_data = pd.concat([test_data,test_df], axis=0) # sub_sample = pd.concat([sub_sample,sample_prediction_df], axis=0) # env.predict(sample_prediction_df) # # predict pred1 = model1.predict(test_X) pred2 = model2.predict(test_X) pred3 = model3.predict(test_X) pred4 = model4.predict(test_X) # merge submission sample_prediction_df["target1"] = np.clip(pred1, 0, 100) sample_prediction_df["target2"] = np.clip(pred2, 0, 100) sample_prediction_df["target3"] = np.clip(pred3, 0, 100) sample_prediction_df["target4"] = np.clip(pred4, 0, 100) sample_prediction_df = sample_prediction_df.fillna(0.0) # sample_prediction_df["date"] = sample_prediction_df["date"].replace({"-":""}, regex=True) # sample_prediction_df["playerId"] = sample_prediction_df["playerId"].astype(str) # sample_prediction_df["date_playerId"] = sample_prediction_df["date"]+"_"+sample_prediction_df["playerId"] del sample_prediction_df["playerId"] del sample_prediction_df["date"] # sample_prediction_df = sample_prediction_df[["date_playerId","target1","target2","target3","target4"]] final_headers = ["date_playerId", "target1", "target2", "target3", "target4"] sample_prediction_df = sample_prediction_df[final_headers].copy() # sample_prediction_df["target1"] = sample_prediction_df["target1"].round(6) # sample_prediction_df["target2"] = sample_prediction_df["target2"].round(6) # sample_prediction_df["target3"] = sample_prediction_df["target3"].round(6) # sample_prediction_df["target4"] = sample_prediction_df["target4"].round(6) env.predict(sample_prediction_df) # test_data.to_csv("test_data2.csv",index = False) # sub_sample.to_csv("sub_sample.csv", index = False) sub = pd.read_csv("./submission.csv") sub # sub.isnull().sum() # sub.dtypes # tmpsub # for i, v in sub.iterrows(): # tmpsub.loc[tmpsub["date_playerId"]==v['date_playerId'],"target1"] = v["target1"] # tmpsub.loc[tmpsub["date_playerId"]==v['date_playerId'],"target2"] = v["target2"] # tmpsub.loc[tmpsub["date_playerId"]==v['date_playerId'],"target3"] = v["target3"] # tmpsub.loc[tmpsub["date_playerId"]==v['date_playerId'],"target4"] = v["target4"] # tmpsub # tmpsub.to_csv('./submission.csv')
false
0
8,669
0
8,689
8,669
69913748
<jupyter_start><jupyter_text>Computer Vision Unsaac Images Kaggle dataset identifier: computer-vision-unsaac-images <jupyter_script># # TAREA Detección de Bordes de Canny # ``` # Autor : Jorge Andre Salcedo Hurtado # Proposito : Detección de Bordes de Canny # Problema : Utilizar el codigo fuente y la imagen brindada para detectar los bordes por Canny # ``` # # Librerias utilizadas import cv2 as cv import numpy as np from scipy import ndimage from scipy.ndimage.filters import convolve # # Clase Detector de bordes Canny class cannyEdgeDetector: def __init__( self, imgs, sigma=1, kernel_size=5, weak_pixel=75, strong_pixel=255, lowthreshold=0.05, highthreshold=0.15, ): self.imgs = imgs self.imgs_final = [] self.img_smoothed = None self.gradientMat = None self.thetaMat = None self.nonMaxImg = None self.thresholdImg = None self.weak_pixel = weak_pixel self.strong_pixel = strong_pixel self.sigma = sigma self.kernel_size = kernel_size self.lowThreshold = lowthreshold self.highThreshold = highthreshold return def gaussian_kernel(self, size, sigma=1): size = int(size) // 2 x, y = np.mgrid[-size : size + 1, -size : size + 1] normal = 1 / (2.0 * np.pi * sigma**2) g = np.exp(-((x**2 + y**2) / (2.0 * sigma**2))) * normal return g def sobel_filters(self, img): Kx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32) Ky = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], np.float32) Ix = ndimage.filters.convolve(img, Kx) Iy = ndimage.filters.convolve(img, Ky) G = np.hypot(Ix, Iy) G = G / G.max() * 255 theta = np.arctan2(Iy, Ix) return (G, theta, [Ix, Iy]) def non_max_suppression(self, img, D): M, N = img.shape Z = np.zeros((M, N), dtype=np.int32) angle = D * 180.0 / np.pi angle[angle < 0] += 180 for i in range(1, M - 1): for j in range(1, N - 1): try: q = 255 r = 255 # angle 0 if (0 <= angle[i, j] < 22.5) or (157.5 <= angle[i, j] <= 180): q = img[i, j + 1] r = img[i, j - 1] # angle 45 elif 22.5 <= angle[i, j] < 67.5: q = img[i + 1, j - 1] r = img[i - 1, j + 1] # angle 90 elif 67.5 <= angle[i, j] < 112.5: q = img[i + 1, j] r = img[i - 1, j] # angle 135 elif 112.5 <= angle[i, j] < 157.5: q = img[i - 1, j - 1] r = img[i + 1, j + 1] if (img[i, j] >= q) and (img[i, j] >= r): Z[i, j] = img[i, j] else: Z[i, j] = 0 except IndexError as e: pass return Z def threshold(self, img): highThreshold = img.max() * self.highThreshold lowThreshold = highThreshold * self.lowThreshold M, N = img.shape res = np.zeros((M, N), dtype=np.int32) weak = np.int32(self.weak_pixel) strong = np.int32(self.strong_pixel) strong_i, strong_j = np.where(img >= highThreshold) zeros_i, zeros_j = np.where(img < lowThreshold) weak_i, weak_j = np.where((img <= highThreshold) & (img >= lowThreshold)) res[strong_i, strong_j] = strong res[weak_i, weak_j] = weak return res def hysteresis(self, img): M, N = img.shape weak = self.weak_pixel strong = self.strong_pixel for i in range(1, M - 1): for j in range(1, N - 1): if img[i, j] == weak: try: if ( (img[i + 1, j - 1] == strong) or (img[i + 1, j] == strong) or (img[i + 1, j + 1] == strong) or (img[i, j - 1] == strong) or (img[i, j + 1] == strong) or (img[i - 1, j - 1] == strong) or (img[i - 1, j] == strong) or (img[i - 1, j + 1] == strong) ): img[i, j] = strong else: img[i, j] = 0 except IndexError as e: pass return img def detect(self): imgs_final = [] for i, img in enumerate(self.imgs): self.img_smoothed = convolve( img, self.gaussian_kernel(self.kernel_size, self.sigma) ) self.gradientMat, self.thetaMat = self.sobel_filters(self.img_smoothed) self.nonMaxImg = self.non_max_suppression(self.gradientMat, self.thetaMat) self.thresholdImg = self.threshold(self.nonMaxImg) img_final = self.hysteresis(self.thresholdImg) self.imgs_final.append(img_final) return self.imgs_final # # 1. Lectura de datos img = cv.imread("../input/computer-vision-unsaac-images/4.jpg") # convertimos a escala de grises r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2] gray = 0.2989 * r + 0.5870 * g + 0.1140 * b # mostramos la imagen plot(gray, "Escala de grises") # inicializamos nuestra clase detector de bordes Canny canny = cannyEdgeDetector( gray, kernel_size=5, lowthreshold=0.12, highthreshold=0.07, weak_pixel=100 ) # # 2. Suavizado suavizado = canny.gaussian_kernel(6) canny.img_smoothed = convolve(canny.imgs, suavizado) # mostramos la imagen plot(canny.img_smoothed, "Imagen suavizada") # # 3. Gradiente canny.gradientMat, canny.thetaMat, Sobel = canny.sobel_filters(canny.img_smoothed) # mostramos la imagen plot(canny.gradientMat, "Gradiente") # ## Sobel X plot(Sobel[0], "Sobel X") # ## Sobel Y plot(Sobel[1], "Sobel Y") # # 4. Supresion no maxima canny.nonMaxImg = canny.non_max_suppression(canny.gradientMat, canny.thetaMat) # mostramos la imagen plot(canny.nonMaxImg, "Supresion no maxima") # # 4. Doble Umbral canny.thresholdImg = canny.threshold(canny.nonMaxImg) # mostramos la imagen plot(canny.thresholdImg, "Umbral Doble") # # 6. Metodo final Histeresis img_final = canny.hysteresis(canny.thresholdImg) # mostramos la imagen plot(img_final, "Histeresis")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/913/69913748.ipynb
computer-vision-unsaac-images
andremsh
[{"Id": 69913748, "ScriptId": 19115604, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6400344, "CreationDate": "08/03/2021 20:20:06", "VersionNumber": 2.0, "Title": "Salcedo Hurtado, Deteccion de Bordes de Canny", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 205.0, "LinesInsertedFromPrevious": 10.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 195.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 93333934, "KernelVersionId": 69913748, "SourceDatasetVersionId": 2496112}]
[{"Id": 2496112, "DatasetId": 1511276, "DatasourceVersionId": 2538734, "CreatorUserId": 6400344, "LicenseName": "Unknown", "CreationDate": "08/03/2021 19:25:10", "VersionNumber": 1.0, "Title": "Computer Vision Unsaac Images", "Slug": "computer-vision-unsaac-images", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1511276, "CreatorUserId": 6400344, "OwnerUserId": 6400344.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2496112.0, "CurrentDatasourceVersionId": 2538734.0, "ForumId": 1531038, "Type": 2, "CreationDate": "08/03/2021 19:25:10", "LastActivityDate": "08/03/2021", "TotalViews": 653, "TotalDownloads": 5, "TotalVotes": 0, "TotalKernels": 2}]
[{"Id": 6400344, "UserName": "andremsh", "DisplayName": "JORGE ANDRE SALCEDO HURTADO", "RegisterDate": "12/18/2020", "PerformanceTier": 0}]
# # TAREA Detección de Bordes de Canny # ``` # Autor : Jorge Andre Salcedo Hurtado # Proposito : Detección de Bordes de Canny # Problema : Utilizar el codigo fuente y la imagen brindada para detectar los bordes por Canny # ``` # # Librerias utilizadas import cv2 as cv import numpy as np from scipy import ndimage from scipy.ndimage.filters import convolve # # Clase Detector de bordes Canny class cannyEdgeDetector: def __init__( self, imgs, sigma=1, kernel_size=5, weak_pixel=75, strong_pixel=255, lowthreshold=0.05, highthreshold=0.15, ): self.imgs = imgs self.imgs_final = [] self.img_smoothed = None self.gradientMat = None self.thetaMat = None self.nonMaxImg = None self.thresholdImg = None self.weak_pixel = weak_pixel self.strong_pixel = strong_pixel self.sigma = sigma self.kernel_size = kernel_size self.lowThreshold = lowthreshold self.highThreshold = highthreshold return def gaussian_kernel(self, size, sigma=1): size = int(size) // 2 x, y = np.mgrid[-size : size + 1, -size : size + 1] normal = 1 / (2.0 * np.pi * sigma**2) g = np.exp(-((x**2 + y**2) / (2.0 * sigma**2))) * normal return g def sobel_filters(self, img): Kx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32) Ky = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], np.float32) Ix = ndimage.filters.convolve(img, Kx) Iy = ndimage.filters.convolve(img, Ky) G = np.hypot(Ix, Iy) G = G / G.max() * 255 theta = np.arctan2(Iy, Ix) return (G, theta, [Ix, Iy]) def non_max_suppression(self, img, D): M, N = img.shape Z = np.zeros((M, N), dtype=np.int32) angle = D * 180.0 / np.pi angle[angle < 0] += 180 for i in range(1, M - 1): for j in range(1, N - 1): try: q = 255 r = 255 # angle 0 if (0 <= angle[i, j] < 22.5) or (157.5 <= angle[i, j] <= 180): q = img[i, j + 1] r = img[i, j - 1] # angle 45 elif 22.5 <= angle[i, j] < 67.5: q = img[i + 1, j - 1] r = img[i - 1, j + 1] # angle 90 elif 67.5 <= angle[i, j] < 112.5: q = img[i + 1, j] r = img[i - 1, j] # angle 135 elif 112.5 <= angle[i, j] < 157.5: q = img[i - 1, j - 1] r = img[i + 1, j + 1] if (img[i, j] >= q) and (img[i, j] >= r): Z[i, j] = img[i, j] else: Z[i, j] = 0 except IndexError as e: pass return Z def threshold(self, img): highThreshold = img.max() * self.highThreshold lowThreshold = highThreshold * self.lowThreshold M, N = img.shape res = np.zeros((M, N), dtype=np.int32) weak = np.int32(self.weak_pixel) strong = np.int32(self.strong_pixel) strong_i, strong_j = np.where(img >= highThreshold) zeros_i, zeros_j = np.where(img < lowThreshold) weak_i, weak_j = np.where((img <= highThreshold) & (img >= lowThreshold)) res[strong_i, strong_j] = strong res[weak_i, weak_j] = weak return res def hysteresis(self, img): M, N = img.shape weak = self.weak_pixel strong = self.strong_pixel for i in range(1, M - 1): for j in range(1, N - 1): if img[i, j] == weak: try: if ( (img[i + 1, j - 1] == strong) or (img[i + 1, j] == strong) or (img[i + 1, j + 1] == strong) or (img[i, j - 1] == strong) or (img[i, j + 1] == strong) or (img[i - 1, j - 1] == strong) or (img[i - 1, j] == strong) or (img[i - 1, j + 1] == strong) ): img[i, j] = strong else: img[i, j] = 0 except IndexError as e: pass return img def detect(self): imgs_final = [] for i, img in enumerate(self.imgs): self.img_smoothed = convolve( img, self.gaussian_kernel(self.kernel_size, self.sigma) ) self.gradientMat, self.thetaMat = self.sobel_filters(self.img_smoothed) self.nonMaxImg = self.non_max_suppression(self.gradientMat, self.thetaMat) self.thresholdImg = self.threshold(self.nonMaxImg) img_final = self.hysteresis(self.thresholdImg) self.imgs_final.append(img_final) return self.imgs_final # # 1. Lectura de datos img = cv.imread("../input/computer-vision-unsaac-images/4.jpg") # convertimos a escala de grises r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2] gray = 0.2989 * r + 0.5870 * g + 0.1140 * b # mostramos la imagen plot(gray, "Escala de grises") # inicializamos nuestra clase detector de bordes Canny canny = cannyEdgeDetector( gray, kernel_size=5, lowthreshold=0.12, highthreshold=0.07, weak_pixel=100 ) # # 2. Suavizado suavizado = canny.gaussian_kernel(6) canny.img_smoothed = convolve(canny.imgs, suavizado) # mostramos la imagen plot(canny.img_smoothed, "Imagen suavizada") # # 3. Gradiente canny.gradientMat, canny.thetaMat, Sobel = canny.sobel_filters(canny.img_smoothed) # mostramos la imagen plot(canny.gradientMat, "Gradiente") # ## Sobel X plot(Sobel[0], "Sobel X") # ## Sobel Y plot(Sobel[1], "Sobel Y") # # 4. Supresion no maxima canny.nonMaxImg = canny.non_max_suppression(canny.gradientMat, canny.thetaMat) # mostramos la imagen plot(canny.nonMaxImg, "Supresion no maxima") # # 4. Doble Umbral canny.thresholdImg = canny.threshold(canny.nonMaxImg) # mostramos la imagen plot(canny.thresholdImg, "Umbral Doble") # # 6. Metodo final Histeresis img_final = canny.hysteresis(canny.thresholdImg) # mostramos la imagen plot(img_final, "Histeresis")
false
0
2,116
0
2,144
2,116
69913565
<jupyter_start><jupyter_text>MQTTset # MQTTset, a new dataset for MQTT The proposed work aims to create a dataset linked to the IoT context, in particular on the MQTT communication protocol, in order to give to the research and industrial community an initial dataset to use in their application. The dataset is composed by IoT sensors based on MQTT where each aspect of a real network is defined. In particular, the MQTT broker is instantiated by using Eclipse Mosquitto and the network is composed by 8 sensors. The scenario is related to a smart home environment where sensors retrieve information about temperature, light, humidity, CO-Gas, motion, smoke, door and fan with different time interval since the behaviour of each sensor is different with the others. ## Getting Started In order to user the dataset, simply download the github repository and start to work with the csv or PCAP file. More information are available at the following page: [Vaccari, I.; Chiola, G.; Aiello, M.; Mongelli, M.; Cambiaso, E. MQTTset, a New Dataset for Machine Learning Techniques on MQTT. Sensors 2020, 20, 6578](https://www.mdpi.com/1424-8220/20/22/6578/htm) Please if you use this dataset in a research work, please cite this article. ### MQTT network structure As mentioned, the dataset isc composed by 8 MQTT sensors with different features. In table, the MQTT sensors are reported. Each sensor is associated with a data profile and a topic linked to the MQTT broker. The data profile consists of the type of data that the sensors communicate while the topic is defined by the sensor when sending the data to the broker. Finally, the sensors were conceptually divided into two rooms as if they were distributed in a smart house and the MQTT broker has 10.16.100.73 as IP address with 1883 as clear text communication port. In the table, the time could be periodic o random. This concept is important since a temperature sensor has a periodic behavior over time, i.e. cyclically sending information retrieved from the environment periodically (defined as P). Instead, a motion sensor has a more random behavior since it sends information only when a user passes in front of the sensor (defined as R)). By analyzing also this aspect, the dataset is even more valid as a real behavior of a home automation is simulated and implemented. Sensor | IP address | Room | Time (P:periodic, R:random) | Topic | Data Profile --- | --- | --- | --- |--- |--- Temperature | 192.168.0.151 | 1 | P, 60 s | Temperature| Temperature Light intensity | 192.168.0.150 | 1 | P, 1800 s | Light intensity| Light intensity Humidity | 192.168.0.152 | 1 | P, 60 s | Humidity| Humidity Motion sensor | 192.168.0.154 | 1 | R, 1 h | Motion sensor | Motion sensor CO-Gas | 192.168.0.155 | 1 | R, 1 h s | CO-Gas| CO-Gas Smoke | 192.168.0.180 | 2 | R, 1 h | Smoke| Smoke Fan speed controller | 192.168.0.173 | 2 | P, 120 s | Fan speed controller| Fan speed controller Door lock | 192.168.0.176 | 2 |R, 1 h | Door lock| Door lock Fan sensor | 192.168.0.178 | 2 | P, 60 s | Fan sensor| Fan sensor Motion sensor | 192.168.0.174 | 2 | R, 1 h | Motion sensor | Motion sensor ### Github repository organization The repository is composed by 3 folder: * PCAP raw data * Legitimate * SlowITe * Bruteforce * Malformed data * Flooding * DoS attack * CSV file * Legitimate * SlowITe * Bruteforce * Malformed data * Flooding * DoS attack * Final dataset * train70.csv, test30.csv * train70_reduced.csv, test30_reduced.csv * train70_augmented.csv, test30_augmented.csv In the PCAP folder, there are the raw network data recovered directly from the sensors of the MQTT network and also the traffic related to the attacks. In the CSV folder instead, there are the data and features extracted from the PCAP file using the tshark tool. Finally, the FINAL_CSV folder contains the CSV files combined with each other and subsequently used for machine learning algorithms. In particular, CSV files are present in 3 different formats: * train70.csv, test30.csv: in these files, the legitimate traffic was randomly combined with the different malicious traffic. * train70_reduced.csv, test30_reduced.csv: the reduced form combines malicious traffic with legitimate traffic in the 50:50 form, so there will be less legitimate traffic than actually. The legitimate traffic will be equal to the sum of the malicious traffic * train70_augmented.csv, test30_augmented.csv: in the augmented form, however, the malicious traffic has been increased so that the sum of the traffic related to the attacks is equal to the legitimate traffic. ## Installation In order to use the dataset combined with the proposed code, run the following command to install python3 libraries: ``` pip3 install -r requirements.txt ``` ## Built With * [IoT-Flock](https://github.com/ThingzDefense/IoT-Flock) - Framework to generate IoT networks * [MQTTSA](https://github.com/stfbk/mqttsa) - A security assessment tool for MQTT networks ## Authors * **Ivan Vaccari** - *Concept, implementation, elaboration, paper writer* - [Profile](https://www.ieiit.cnr.it/people/Vaccari-Ivan) * **Giovanni Chiola** - *Dataset approach and definitiom* - [Profile](https://www.dibris.unige.it/chiola-giovanni) * **Maurizio Aiello** - *Supervisor, paper reviewer* - [Profile](https://www.ieiit.cnr.it/people/Aiello-Maurizio) * **Maurizio Mongelli** - *Machine learning support and contribution* - [Profile](https://www.ieiit.cnr.it/people/Mongelli-Maurizio) * **Enrico Cambiaso** - *Supervisor, elaboration, paper collaboration* - [Profile](https://www.ieiit.cnr.it/people/Cambiaso-Enrico) Kaggle dataset identifier: mqttset <jupyter_code>import pandas as pd df = pd.read_csv('mqttset/Data/FINAL_CSV/train70_reduced.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 231646 entries, 0 to 231645 Data columns (total 34 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 tcp.flags 231646 non-null object 1 tcp.time_delta 231646 non-null float64 2 tcp.len 231646 non-null int64 3 mqtt.conack.flags 231646 non-null object 4 mqtt.conack.flags.reserved 231646 non-null float64 5 mqtt.conack.flags.sp 231646 non-null float64 6 mqtt.conack.val 231646 non-null float64 7 mqtt.conflag.cleansess 231646 non-null float64 8 mqtt.conflag.passwd 231646 non-null float64 9 mqtt.conflag.qos 231646 non-null float64 10 mqtt.conflag.reserved 231646 non-null float64 11 mqtt.conflag.retain 231646 non-null float64 12 mqtt.conflag.uname 231646 non-null float64 13 mqtt.conflag.willflag 231646 non-null float64 14 mqtt.conflags 231646 non-null object 15 mqtt.dupflag 231646 non-null float64 16 mqtt.hdrflags 231646 non-null object 17 mqtt.kalive 231646 non-null float64 18 mqtt.len 231646 non-null float64 19 mqtt.msg 231646 non-null object 20 mqtt.msgid 231646 non-null float64 21 mqtt.msgtype 231646 non-null float64 22 mqtt.proto_len 231646 non-null float64 23 mqtt.protoname 231646 non-null object 24 mqtt.qos 231646 non-null float64 25 mqtt.retain 231646 non-null float64 26 mqtt.sub.qos 231646 non-null float64 27 mqtt.suback.qos 231646 non-null float64 28 mqtt.ver 231646 non-null float64 29 mqtt.willmsg 231646 non-null float64 30 mqtt.willmsg_len 231646 non-null float64 31 mqtt.willtopic 231646 non-null float64 32 mqtt.willtopic_len 231646 non-null float64 33 target 231646 non-null object dtypes: float64(26), int64(1), object(7) memory usage: 60.1+ MB <jupyter_text>Examples: { "tcp.flags": "0x00000018", "tcp.time_delta": 0.998867, "tcp.len": 10, "mqtt.conack.flags": 0, "mqtt.conack.flags.reserved": 0, "mqtt.conack.flags.sp": 0, "mqtt.conack.val": 0, "mqtt.conflag.cleansess": 0, "mqtt.conflag.passwd": 0, "mqtt.conflag.qos": 0, "mqtt.conflag.reserved": 0, "mqtt.conflag.retain": 0, "mqtt.conflag.uname": 0, "mqtt.conflag.willflag": 0, "mqtt.conflags": 0, "mqtt.dupflag": 0, "mqtt.hdrflags": "0x00000030", "mqtt.kalive": 0, "mqtt.len": 8, "mqtt.msg": 32.0, "...": "and 14 more columns" } { "tcp.flags": "0x00000010", "tcp.time_delta": 6.7e-05, "tcp.len": 1460, "mqtt.conack.flags": 0, "mqtt.conack.flags.reserved": 0, "mqtt.conack.flags.sp": 0, "mqtt.conack.val": 0, "mqtt.conflag.cleansess": 0, "mqtt.conflag.passwd": 0, "mqtt.conflag.qos": 0, "mqtt.conflag.reserved": 0, "mqtt.conflag.retain": 0, "mqtt.conflag.uname": 0, "mqtt.conflag.willflag": 0, "mqtt.conflags": 0, "mqtt.dupflag": 0, "mqtt.hdrflags": "0x00000032", "mqtt.kalive": 0, "mqtt.len": 169, "mqtt.msg": 6.361653943666145e+199, "...": "and 14 more columns" } { "tcp.flags": "0x00000010", "tcp.time_delta": 5.8e-05, "tcp.len": 1460, "mqtt.conack.flags": 0, "mqtt.conack.flags.reserved": 0, "mqtt.conack.flags.sp": 0, "mqtt.conack.val": 0, "mqtt.conflag.cleansess": 0, "mqtt.conflag.passwd": 0, "mqtt.conflag.qos": 0, "mqtt.conflag.reserved": 0, "mqtt.conflag.retain": 0, "mqtt.conflag.uname": 0, "mqtt.conflag.willflag": 0, "mqtt.conflags": 0, "mqtt.dupflag": 0, "mqtt.hdrflags": "0x00000032", "mqtt.kalive": 0, "mqtt.len": 163, "mqtt.msg": 4.232646141394334e+187, "...": "and 14 more columns" } { "tcp.flags": "0x00000018", "tcp.time_delta": 0.000227, "tcp.len": 10, "mqtt.conack.flags": 0, "mqtt.conack.flags.reserved": 0, "mqtt.conack.flags.sp": 0, "mqtt.conack.val": 0, "mqtt.conflag.cleansess": 0, "mqtt.conflag.passwd": 0, "mqtt.conflag.qos": 0, "mqtt.conflag.reserved": 0, "mqtt.conflag.retain": 0, "mqtt.conflag.uname": 0, "mqtt.conflag.willflag": 0, "mqtt.conflags": 0, "mqtt.dupflag": 0, "mqtt.hdrflags": "0x00000030", "mqtt.kalive": 0, "mqtt.len": 8, "mqtt.msg": 32.0, "...": "and 14 more columns" } <jupyter_script>from sklearn.model_selection import GridSearchCV import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn import metrics from keras.wrappers.scikit_learn import KerasClassifier from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation from tensorflow.keras.callbacks import EarlyStopping from keras.utils.vis_utils import plot_model from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neural_network import MLPClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.svm import SVC from sklearn import tree from sklearn.metrics import confusion_matrix from matplotlib import pyplot as pyplot from sklearn.metrics import roc_auc_score, roc_curve from sklearn.metrics import plot_confusion_matrix from warnings import simplefilter import matplotlib.pyplot as plt import seaborn as sns import time start = time.time() dftrain = pd.read_csv("../input/mqttset/Data/FINAL_CSV/train70_reduced.csv") dftest = pd.read_csv("../input/datarev4/test30_reduced.csv") simplefilter(action="ignore", category=FutureWarning) seed = 7 # train # print(dftrain.loc[dftrain['target'] == 'legitimate']) print(dftrain["target"]) print(dftrain["target"].value_counts()) class_names = dftrain.target.unique() dftrain = dftrain.astype("category") cat_columns = dftrain.select_dtypes(["category"]).columns dftrain[cat_columns] = dftrain[cat_columns].apply(lambda x: x.cat.codes) # print(dftrain.loc[125, 'target']) x_columns = dftrain.columns.drop("target") x_train = dftrain[x_columns].values y_train = dftrain["target"] # test class_names = dftest.target.unique() dftest = dftest.astype("category") cat_columns = dftest.select_dtypes(["category"]).columns dftest[cat_columns] = dftest[cat_columns].apply(lambda x: x.cat.codes) x_columns = dftest.columns.drop("target") x_test = dftest[x_columns].values y_test = dftest["target"] print("Ready to generate train and test datasets") # Visualization def bar_graph(feature): dftest[feature].value_counts().plot(kind="bar") plt.figure(figsize=[15, 10]) # Data to be plotted totalDeath = [140000, 140000, 140000, 700000, 140000, 5000] totalRecovery = [1000, 1000, 1000, 1000, 1000, 1000] activeCases = [1139958, 347973, 239999, 129360, 34730, 34730] # Using numpy to group 3 different data with bars X = np.arange(len(totalDeath)) # Passing the parameters to the bar function, this is the main function which creates the bar plot # Using X now to align the bars side by side plt.bar(X, totalDeath, color="blue", width=0.25) # Creating the legend of the bars in the plot # Overiding the x axis with the country names plt.xticks( [i + 0.0 for i in range(6)], ["Dos", "Bruteforce", "SlowITE", "Legitimate", "Malformed", "MITM"], fontsize=18, fontweight="bold", ) # Giving the tilte for the plot plt.title( "Saldırı ve Veri Sayılarının Karşılaştırılması", fontsize=18, fontweight="bold" ) # Namimg the x and y axis plt.ylabel("Adet") # Saving the plot as a 'png' plt.savefig("4BarPlot.png") # Displaying the bar plot plt.show() countries = ["Dos", "Bruteforce", "SlowITE", "Legitimate", "Malformed"] # Integer value interms of death counts totalDeaths = [140000, 140000, 140000, 700000, 140000] # Passing the parameters to the bar function, this is the main function which creates the bar plot plt.bar(countries, totalDeaths) # Displaying the bar plot plt.show() plt.title("Saldırı ve Veri Sayılarının Karşılaştırılması") # Namimg the x and y axis plt.ylabel("Sayı") x = np.arange(5) plt.bar(x, height=[1400000, 1400000, 1400000, 7000000, 1400000]) plt.xticks(x, ["Dos", "Bruteforce", "SlowITE", "Legitimate", "Malformed"]) # Neural network print("Starting Random forest") model = Sequential() model.add( Dense( 50, input_dim=x_train.shape[1], kernel_initializer="normal", activation="relu" ) ) model.add( Dense( 30, input_dim=x_train.shape[1], kernel_initializer="normal", activation="relu" ) ) model.add(Dense(20, kernel_initializer="normal")) model.add(Dense(6, activation="softmax")) model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) monitor = EarlyStopping( monitor="val_loss", min_delta=1e-3, patience=5, verbose=1, mode="auto" ) history = model.fit( x_train, y_train, validation_data=(x_test, y_test), callbacks=[monitor], verbose=2, epochs=200, batch_size=1000, ) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_nn = model.predict(x_test) y_pred_nn = np.argmax(y_pred_nn, axis=1) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) print(model.summary()) # RandomForest print("Starting Random forest") classifier = RandomForestClassifier(verbose=2, random_state=seed) classifier.fit(x_train, y_train) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_random = classifier.predict(x_test) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) # Create Naive Bayes Classifier print("Starting Naive Bayes") gnb = GaussianNB() gnb.fit(x_train, y_train) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_nb = gnb.predict(x_test) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) # Decision tree print("Starting Decision tree") clf = DecisionTreeClassifier() clf = clf.fit(x_train, y_train) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_dt = clf.predict(x_test) y_pred_dt_roc = clf.predict_proba(x_test) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) # Multi layer perceptron print("Starting Multi layer perceptron") model = MLPClassifier( max_iter=130, batch_size=1000, alpha=1e-4, activation="relu", solver="adam", verbose=10, tol=1e-4, random_state=seed, ) model.fit(x_train, y_train) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_mlp = model.predict(x_test) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) # Gradient boost print("Starting Gradient boost") model = GradientBoostingClassifier(n_estimators=20, random_state=seed, verbose=2) model.fit(x_train, y_train) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_gradient = model.predict(x_test) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) print( "Decision Tree, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_dt)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_dt, average="weighted")) ) matrixdt = confusion_matrix(y_test, y_pred_dt) print(matrixdt) print( "Naive Bayes, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_nb)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_nb, average="weighted")) ) matrixnv = confusion_matrix(y_test, y_pred_nb) print(matrixnv) print( "Neural network, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_nn)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_nn, average="weighted")) ) matrixnn = confusion_matrix(y_test, y_pred_nn) print(matrixnn) print( "MultiLayerPerceptron, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_mlp)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_mlp, average="weighted")) ) matrixml = confusion_matrix(y_test, y_pred_mlp) print(matrixml) print( "Random Forest, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_random)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_random, average="weighted")) ) matrixrf = confusion_matrix(y_test, y_pred_random) print(matrixrf) print( "GradienBoost, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_gradient)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_gradient, average="weighted")) ) matrixgb = confusion_matrix(y_test, y_pred_gradient) print(matrixgb) import pandas as pd import tensorflow as tf from tensorflow import keras from sklearn.model_selection import train_test_split import numpy as np import matplotlib.pyplot as plt loss_train = history.history["accuracy"] loss_val = history.history["val_accuracy"] print(loss_train) print(loss_val) epochs = range(6) plt.plot(epochs, loss_train, "g", label="Training accuracy") plt.plot(epochs, loss_val, "b", label="validation accuracy") plt.title("Training and Validation accuracy") plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend() plt.show() dftest.columns # DATA CORRELATION dftest.columns dftest = dftest.dropna("columns") # drop columns with NaN dftest = dftest[ [col for col in dftest if dftest[col].nunique() > 1] ] # keep columns where there are more than 1 unique values corr = dftest.corr() plt.figure(figsize=(15, 12)) sns.heatmap(corr) plt.show() import matplotlib.pyplot as plt plt.rcParams.update({"font.size": 22}) plt.figure(figsize=[15, 10]) # Data to be plotted totalDeath = [ 0.9025883762732175, 0.9467405632115038, 0.8110844817255842, 0.8510844817255842, ] totalRecovery = [1000, 1000, 1000, 1000, 1000, 1000] activeCases = [1139958, 347973, 239999, 129360, 34730, 34730] # Using numpy to group 3 different data with bars X = np.arange(len(totalDeath)) # Passing the parameters to the bar function, this is the main function which creates the bar plot # Using X now to align the bars side by side plt.bar(X, totalDeath, color="blue", width=0.25) # Creating the legend of the bars in the plot # Overiding the x axis with the country names plt.xticks( [i + 0.0 for i in range(4)], ["NB", "DT", "NN", "RF"], fontsize=18, fontweight="bold", ) # Giving the tilte for the plot plt.title("NB,DT, NN ve RF Karşılaştırma", fontsize=18, fontweight="bold") # Namimg the x and y axis plt.ylabel("Doğruluk", fontsize=18, fontweight="bold") # Saving the plot as a 'png' plt.savefig("4BarPlot.png") # Displaying the bar plot plt.show() # Accuracy fig = plt.figure() ax = fig.add_axes([0, 0.2, 0.6, 0.8, 1]) langs = ["NB", "DT", "NN", "RF"] values = [ 0.9025883762732175, 0.9467405632115038, 0.8110844817255842, 0.8510844817255842, ] f = plt.figure(figsize=(15, 3), num=10) plt.subplot(131) plt.ylim(0, 0.2) ax.bar(langs, values, width=0.3) ax.set_ylabel("Doğruluk") ax.set_title("NB,DT ve NN Karşılaştırma") plt.show() # Accuracy fig = plt.figure() ax = fig.add_axes([0, 0.2, 0.6, 0.8, 1]) langs = ["NB", "DT", "NN", "RF"] values = [ 0.9025883762732175, 0.9467405632115038, 0.8110844817255842, 0.8510844817255842, ] f = plt.figure(figsize=(15, 3), num=10) plt.subplot(131) plt.ylim(0, 0.2) ax.bar(langs, values, width=0.3) ax.set_ylabel("Doğruluk") ax.set_title("NB,DT ve NN Karşılaştırma") plt.show() # Accuracy fig = plt.figure() ax = fig.add_axes([0, 0.2, 0.6, 0.8, 1]) langs = ["NB", "DT", "NN", "RF"] values = [ 0.9025883762732175, 0.9467405632115038, 0.8110844817255842, 0.8510844817255842, ] f = plt.figure(figsize=(15, 3), num=10) plt.subplot(131) plt.ylim(0, 0.2) ax.bar(langs, values, width=0.3) ax.set_ylabel("Doğruluk") ax.set_title("NB,DT ve NN Karşılaştırma") plt.show() # F1 Score fig = plt.figure() ax = fig.add_axes([0, 0, 0.8, 1]) langs = ["NB", "DT", "NN"] values = [0.8250899949069051, 0.9040449842708569, 0.8587280320285047] f = plt.figure(figsize=(15, 3), num=10) plt.subplot(131) plt.ylim(0, 0.3) ax.bar(langs, values, width=0.3) ax.set_ylabel("F1 Skor") ax.set_title("NB,DT ve NN Karşılaştırma") import matplotlib.pyplot as plt plt.rcParams.update({"font.size": 22}) plt.figure(figsize=[15, 10]) # Data to be plotted totalDeath = [ 0.8250899949069051, 0.9040449842708569, 0.8587280320285047, 0.9040449842708569, ] totalRecovery = [1000, 1000, 1000, 1000, 1000, 1000] activeCases = [1139958, 347973, 239999, 129360, 34730, 34730] # Using numpy to group 3 different data with bars X = np.arange(len(totalDeath)) # Passing the parameters to the bar function, this is the main function which creates the bar plot # Using X now to align the bars side by side plt.bar(X, totalDeath, color="blue", width=0.25) # Creating the legend of the bars in the plot # Overiding the x axis with the country names plt.xticks( [i + 0.0 for i in range(4)], ["NB", "DT", "NN", "RF"], fontsize=18, fontweight="bold", ) # Giving the tilte for the plot plt.title("NB,DT, NN ve RF Karşılaştırma", fontsize=18, fontweight="bold") # Namimg the x and y axis plt.ylabel("F1 Skor", fontsize=18, fontweight="bold") # Saving the plot as a 'png' plt.savefig("4BarPlot.png", fontsize=18, fontweight="bold") # Displaying the bar plot plt.show()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/913/69913565.ipynb
mqttset
cnrieiit
[{"Id": 69913565, "ScriptId": 16728773, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6467833, "CreationDate": "08/03/2021 20:19:04", "VersionNumber": 6.0, "Title": "notebook46ef6d1091", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 379.0, "LinesInsertedFromPrevious": 185.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 194.0, "LinesInsertedFromFork": 258.0, "LinesDeletedFromFork": 3.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 121.0, "TotalVotes": 1}]
[{"Id": 93333854, "KernelVersionId": 69913565, "SourceDatasetVersionId": 2424115}]
[{"Id": 2424115, "DatasetId": 979765, "DatasourceVersionId": 2466316, "CreatorUserId": 6182134, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "07/14/2021 08:34:12", "VersionNumber": 5.0, "Title": "MQTTset", "Slug": "mqttset", "Subtitle": "A new dataset for machine learning techniques on MQTT", "Description": "# MQTTset, a new dataset for MQTT \n\nThe proposed work aims to create a dataset linked to the IoT context, in particular on the MQTT communication protocol, in order to give to the research and industrial community an initial dataset to use in their application. The dataset is composed by IoT sensors based on MQTT where each aspect of a real network is defined. In particular, the MQTT broker is instantiated by using Eclipse Mosquitto and the network is composed by 8 sensors. The scenario is related to a smart home environment where sensors retrieve information about temperature, light, humidity, CO-Gas, motion, smoke, door and fan with different time interval since the behaviour of each sensor is different with the others.\n\n## Getting Started\n\nIn order to user the dataset, simply download the github repository and start to work with the csv or PCAP file. More information are available at the following page: [Vaccari, I.; Chiola, G.; Aiello, M.; Mongelli, M.; Cambiaso, E. MQTTset, a New Dataset for Machine Learning Techniques on MQTT. Sensors 2020, 20, 6578](https://www.mdpi.com/1424-8220/20/22/6578/htm)\n\n\nPlease if you use this dataset in a research work, please cite this article.\n\n### MQTT network structure\nAs mentioned, the dataset isc composed by 8 MQTT sensors with different features. In table, the MQTT sensors are reported. Each sensor is associated with a data profile and a topic linked to the MQTT broker. The data profile consists of the type of data that the sensors communicate while the topic is defined by the sensor when sending the data to the broker. Finally, the sensors were conceptually divided into two rooms as if they were distributed in a smart house and the MQTT broker has 10.16.100.73 as IP address with 1883 as clear text communication port. In the table, the time could be periodic o random. This concept is important since a temperature sensor has a periodic behavior over time, i.e. cyclically sending information retrieved from the environment periodically (defined as P). Instead, a motion sensor has a more random behavior since it sends information only when a user passes in front of the sensor (defined as R)). By analyzing also this aspect, the dataset is even more valid as a real behavior of a home automation is simulated and implemented. \n\nSensor | IP address | Room | Time (P:periodic, R:random) | Topic | Data Profile \n--- | --- | --- | --- |--- |--- \nTemperature | 192.168.0.151 | 1 | P, 60 s | Temperature| Temperature \nLight intensity | 192.168.0.150 | 1 | P, 1800 s | Light intensity| Light intensity \nHumidity | 192.168.0.152 | 1 | P, 60 s | Humidity| Humidity \nMotion sensor | 192.168.0.154 | 1 | R, 1 h | Motion sensor | Motion sensor \nCO-Gas | 192.168.0.155 | 1 | R, 1 h s | CO-Gas| CO-Gas \nSmoke | 192.168.0.180 | 2 | R, 1 h | Smoke| Smoke \nFan speed controller | 192.168.0.173 | 2 | P, 120 s | Fan speed controller| Fan speed controller \nDoor lock | 192.168.0.176 | 2 |R, 1 h | Door lock| Door lock \nFan sensor | 192.168.0.178 | 2 | P, 60 s | Fan sensor| Fan sensor \nMotion sensor | 192.168.0.174 | 2 | R, 1 h | Motion sensor | Motion sensor \n\n\n### Github repository organization\n\nThe repository is composed by 3 folder:\n\n* PCAP raw data\n * Legitimate\n * SlowITe\n * Bruteforce\n * Malformed data\n * Flooding\n * DoS attack\n* CSV file\n * Legitimate\n * SlowITe\n * Bruteforce\n * Malformed data\n * Flooding\n * DoS attack\n* Final dataset\n * train70.csv, test30.csv\n * train70_reduced.csv, test30_reduced.csv\n * train70_augmented.csv, test30_augmented.csv\n\nIn the PCAP folder, there are the raw network data recovered directly from the sensors of the MQTT network and also the traffic related to the attacks. In the CSV folder instead, there are the data and features extracted from the PCAP file using the tshark tool. Finally, the FINAL_CSV folder contains the CSV files combined with each other and subsequently used for machine learning algorithms. In particular, CSV files are present in 3 different formats:\n\n\n* train70.csv, test30.csv: in these files, the legitimate traffic was randomly combined with the different malicious traffic.\n* train70_reduced.csv, test30_reduced.csv: the reduced form combines malicious traffic with legitimate traffic in the 50:50 form, so there will be less legitimate traffic than actually. The legitimate traffic will be equal to the sum of the malicious traffic\n* train70_augmented.csv, test30_augmented.csv: in the augmented form, however, the malicious traffic has been increased so that the sum of the traffic related to the attacks is equal to the legitimate traffic.\n\n## Installation\n\nIn order to use the dataset combined with the proposed code, run the following command to install python3 libraries:\n\n```\npip3 install -r requirements.txt\n```\n\n## Built With\n\n* [IoT-Flock](https://github.com/ThingzDefense/IoT-Flock) - Framework to generate IoT networks\n* [MQTTSA](https://github.com/stfbk/mqttsa) - A security assessment tool for MQTT networks\n\n## Authors\n\n* **Ivan Vaccari** - *Concept, implementation, elaboration, paper writer* - [Profile](https://www.ieiit.cnr.it/people/Vaccari-Ivan)\n* **Giovanni Chiola** - *Dataset approach and definitiom* - [Profile](https://www.dibris.unige.it/chiola-giovanni)\n* **Maurizio Aiello** - *Supervisor, paper reviewer* - [Profile](https://www.ieiit.cnr.it/people/Aiello-Maurizio)\n* **Maurizio Mongelli** - *Machine learning support and contribution* - [Profile](https://www.ieiit.cnr.it/people/Mongelli-Maurizio)\n* **Enrico Cambiaso** - *Supervisor, elaboration, paper collaboration* - [Profile](https://www.ieiit.cnr.it/people/Cambiaso-Enrico)", "VersionNotes": "Data Update 2021-07-14", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 979765, "CreatorUserId": 6182134, "OwnerUserId": 6182134.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2424115.0, "CurrentDatasourceVersionId": 2466316.0, "ForumId": 996224, "Type": 2, "CreationDate": "11/17/2020 13:48:34", "LastActivityDate": "11/17/2020", "TotalViews": 21971, "TotalDownloads": 2820, "TotalVotes": 28, "TotalKernels": 3}]
[{"Id": 6182134, "UserName": "cnrieiit", "DisplayName": "Network security group CNR-IEIIT", "RegisterDate": "11/17/2020", "PerformanceTier": 0}]
from sklearn.model_selection import GridSearchCV import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn import metrics from keras.wrappers.scikit_learn import KerasClassifier from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation from tensorflow.keras.callbacks import EarlyStopping from keras.utils.vis_utils import plot_model from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neural_network import MLPClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.svm import SVC from sklearn import tree from sklearn.metrics import confusion_matrix from matplotlib import pyplot as pyplot from sklearn.metrics import roc_auc_score, roc_curve from sklearn.metrics import plot_confusion_matrix from warnings import simplefilter import matplotlib.pyplot as plt import seaborn as sns import time start = time.time() dftrain = pd.read_csv("../input/mqttset/Data/FINAL_CSV/train70_reduced.csv") dftest = pd.read_csv("../input/datarev4/test30_reduced.csv") simplefilter(action="ignore", category=FutureWarning) seed = 7 # train # print(dftrain.loc[dftrain['target'] == 'legitimate']) print(dftrain["target"]) print(dftrain["target"].value_counts()) class_names = dftrain.target.unique() dftrain = dftrain.astype("category") cat_columns = dftrain.select_dtypes(["category"]).columns dftrain[cat_columns] = dftrain[cat_columns].apply(lambda x: x.cat.codes) # print(dftrain.loc[125, 'target']) x_columns = dftrain.columns.drop("target") x_train = dftrain[x_columns].values y_train = dftrain["target"] # test class_names = dftest.target.unique() dftest = dftest.astype("category") cat_columns = dftest.select_dtypes(["category"]).columns dftest[cat_columns] = dftest[cat_columns].apply(lambda x: x.cat.codes) x_columns = dftest.columns.drop("target") x_test = dftest[x_columns].values y_test = dftest["target"] print("Ready to generate train and test datasets") # Visualization def bar_graph(feature): dftest[feature].value_counts().plot(kind="bar") plt.figure(figsize=[15, 10]) # Data to be plotted totalDeath = [140000, 140000, 140000, 700000, 140000, 5000] totalRecovery = [1000, 1000, 1000, 1000, 1000, 1000] activeCases = [1139958, 347973, 239999, 129360, 34730, 34730] # Using numpy to group 3 different data with bars X = np.arange(len(totalDeath)) # Passing the parameters to the bar function, this is the main function which creates the bar plot # Using X now to align the bars side by side plt.bar(X, totalDeath, color="blue", width=0.25) # Creating the legend of the bars in the plot # Overiding the x axis with the country names plt.xticks( [i + 0.0 for i in range(6)], ["Dos", "Bruteforce", "SlowITE", "Legitimate", "Malformed", "MITM"], fontsize=18, fontweight="bold", ) # Giving the tilte for the plot plt.title( "Saldırı ve Veri Sayılarının Karşılaştırılması", fontsize=18, fontweight="bold" ) # Namimg the x and y axis plt.ylabel("Adet") # Saving the plot as a 'png' plt.savefig("4BarPlot.png") # Displaying the bar plot plt.show() countries = ["Dos", "Bruteforce", "SlowITE", "Legitimate", "Malformed"] # Integer value interms of death counts totalDeaths = [140000, 140000, 140000, 700000, 140000] # Passing the parameters to the bar function, this is the main function which creates the bar plot plt.bar(countries, totalDeaths) # Displaying the bar plot plt.show() plt.title("Saldırı ve Veri Sayılarının Karşılaştırılması") # Namimg the x and y axis plt.ylabel("Sayı") x = np.arange(5) plt.bar(x, height=[1400000, 1400000, 1400000, 7000000, 1400000]) plt.xticks(x, ["Dos", "Bruteforce", "SlowITE", "Legitimate", "Malformed"]) # Neural network print("Starting Random forest") model = Sequential() model.add( Dense( 50, input_dim=x_train.shape[1], kernel_initializer="normal", activation="relu" ) ) model.add( Dense( 30, input_dim=x_train.shape[1], kernel_initializer="normal", activation="relu" ) ) model.add(Dense(20, kernel_initializer="normal")) model.add(Dense(6, activation="softmax")) model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) monitor = EarlyStopping( monitor="val_loss", min_delta=1e-3, patience=5, verbose=1, mode="auto" ) history = model.fit( x_train, y_train, validation_data=(x_test, y_test), callbacks=[monitor], verbose=2, epochs=200, batch_size=1000, ) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_nn = model.predict(x_test) y_pred_nn = np.argmax(y_pred_nn, axis=1) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) print(model.summary()) # RandomForest print("Starting Random forest") classifier = RandomForestClassifier(verbose=2, random_state=seed) classifier.fit(x_train, y_train) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_random = classifier.predict(x_test) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) # Create Naive Bayes Classifier print("Starting Naive Bayes") gnb = GaussianNB() gnb.fit(x_train, y_train) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_nb = gnb.predict(x_test) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) # Decision tree print("Starting Decision tree") clf = DecisionTreeClassifier() clf = clf.fit(x_train, y_train) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_dt = clf.predict(x_test) y_pred_dt_roc = clf.predict_proba(x_test) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) # Multi layer perceptron print("Starting Multi layer perceptron") model = MLPClassifier( max_iter=130, batch_size=1000, alpha=1e-4, activation="relu", solver="adam", verbose=10, tol=1e-4, random_state=seed, ) model.fit(x_train, y_train) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_mlp = model.predict(x_test) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) # Gradient boost print("Starting Gradient boost") model = GradientBoostingClassifier(n_estimators=20, random_state=seed, verbose=2) model.fit(x_train, y_train) end = time.time() diff = end - start print("Training time: " + str(diff)) starttest = time.time() y_pred_gradient = model.predict(x_test) endtest = time.time() difftest = endtest - starttest print("Test time: " + str(difftest)) print( "Decision Tree, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_dt)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_dt, average="weighted")) ) matrixdt = confusion_matrix(y_test, y_pred_dt) print(matrixdt) print( "Naive Bayes, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_nb)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_nb, average="weighted")) ) matrixnv = confusion_matrix(y_test, y_pred_nb) print(matrixnv) print( "Neural network, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_nn)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_nn, average="weighted")) ) matrixnn = confusion_matrix(y_test, y_pred_nn) print(matrixnn) print( "MultiLayerPerceptron, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_mlp)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_mlp, average="weighted")) ) matrixml = confusion_matrix(y_test, y_pred_mlp) print(matrixml) print( "Random Forest, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_random)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_random, average="weighted")) ) matrixrf = confusion_matrix(y_test, y_pred_random) print(matrixrf) print( "GradienBoost, accuracy: " + str(metrics.accuracy_score(y_test, y_pred_gradient)) + " F1 score:" + str(metrics.f1_score(y_test, y_pred_gradient, average="weighted")) ) matrixgb = confusion_matrix(y_test, y_pred_gradient) print(matrixgb) import pandas as pd import tensorflow as tf from tensorflow import keras from sklearn.model_selection import train_test_split import numpy as np import matplotlib.pyplot as plt loss_train = history.history["accuracy"] loss_val = history.history["val_accuracy"] print(loss_train) print(loss_val) epochs = range(6) plt.plot(epochs, loss_train, "g", label="Training accuracy") plt.plot(epochs, loss_val, "b", label="validation accuracy") plt.title("Training and Validation accuracy") plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.legend() plt.show() dftest.columns # DATA CORRELATION dftest.columns dftest = dftest.dropna("columns") # drop columns with NaN dftest = dftest[ [col for col in dftest if dftest[col].nunique() > 1] ] # keep columns where there are more than 1 unique values corr = dftest.corr() plt.figure(figsize=(15, 12)) sns.heatmap(corr) plt.show() import matplotlib.pyplot as plt plt.rcParams.update({"font.size": 22}) plt.figure(figsize=[15, 10]) # Data to be plotted totalDeath = [ 0.9025883762732175, 0.9467405632115038, 0.8110844817255842, 0.8510844817255842, ] totalRecovery = [1000, 1000, 1000, 1000, 1000, 1000] activeCases = [1139958, 347973, 239999, 129360, 34730, 34730] # Using numpy to group 3 different data with bars X = np.arange(len(totalDeath)) # Passing the parameters to the bar function, this is the main function which creates the bar plot # Using X now to align the bars side by side plt.bar(X, totalDeath, color="blue", width=0.25) # Creating the legend of the bars in the plot # Overiding the x axis with the country names plt.xticks( [i + 0.0 for i in range(4)], ["NB", "DT", "NN", "RF"], fontsize=18, fontweight="bold", ) # Giving the tilte for the plot plt.title("NB,DT, NN ve RF Karşılaştırma", fontsize=18, fontweight="bold") # Namimg the x and y axis plt.ylabel("Doğruluk", fontsize=18, fontweight="bold") # Saving the plot as a 'png' plt.savefig("4BarPlot.png") # Displaying the bar plot plt.show() # Accuracy fig = plt.figure() ax = fig.add_axes([0, 0.2, 0.6, 0.8, 1]) langs = ["NB", "DT", "NN", "RF"] values = [ 0.9025883762732175, 0.9467405632115038, 0.8110844817255842, 0.8510844817255842, ] f = plt.figure(figsize=(15, 3), num=10) plt.subplot(131) plt.ylim(0, 0.2) ax.bar(langs, values, width=0.3) ax.set_ylabel("Doğruluk") ax.set_title("NB,DT ve NN Karşılaştırma") plt.show() # Accuracy fig = plt.figure() ax = fig.add_axes([0, 0.2, 0.6, 0.8, 1]) langs = ["NB", "DT", "NN", "RF"] values = [ 0.9025883762732175, 0.9467405632115038, 0.8110844817255842, 0.8510844817255842, ] f = plt.figure(figsize=(15, 3), num=10) plt.subplot(131) plt.ylim(0, 0.2) ax.bar(langs, values, width=0.3) ax.set_ylabel("Doğruluk") ax.set_title("NB,DT ve NN Karşılaştırma") plt.show() # Accuracy fig = plt.figure() ax = fig.add_axes([0, 0.2, 0.6, 0.8, 1]) langs = ["NB", "DT", "NN", "RF"] values = [ 0.9025883762732175, 0.9467405632115038, 0.8110844817255842, 0.8510844817255842, ] f = plt.figure(figsize=(15, 3), num=10) plt.subplot(131) plt.ylim(0, 0.2) ax.bar(langs, values, width=0.3) ax.set_ylabel("Doğruluk") ax.set_title("NB,DT ve NN Karşılaştırma") plt.show() # F1 Score fig = plt.figure() ax = fig.add_axes([0, 0, 0.8, 1]) langs = ["NB", "DT", "NN"] values = [0.8250899949069051, 0.9040449842708569, 0.8587280320285047] f = plt.figure(figsize=(15, 3), num=10) plt.subplot(131) plt.ylim(0, 0.3) ax.bar(langs, values, width=0.3) ax.set_ylabel("F1 Skor") ax.set_title("NB,DT ve NN Karşılaştırma") import matplotlib.pyplot as plt plt.rcParams.update({"font.size": 22}) plt.figure(figsize=[15, 10]) # Data to be plotted totalDeath = [ 0.8250899949069051, 0.9040449842708569, 0.8587280320285047, 0.9040449842708569, ] totalRecovery = [1000, 1000, 1000, 1000, 1000, 1000] activeCases = [1139958, 347973, 239999, 129360, 34730, 34730] # Using numpy to group 3 different data with bars X = np.arange(len(totalDeath)) # Passing the parameters to the bar function, this is the main function which creates the bar plot # Using X now to align the bars side by side plt.bar(X, totalDeath, color="blue", width=0.25) # Creating the legend of the bars in the plot # Overiding the x axis with the country names plt.xticks( [i + 0.0 for i in range(4)], ["NB", "DT", "NN", "RF"], fontsize=18, fontweight="bold", ) # Giving the tilte for the plot plt.title("NB,DT, NN ve RF Karşılaştırma", fontsize=18, fontweight="bold") # Namimg the x and y axis plt.ylabel("F1 Skor", fontsize=18, fontweight="bold") # Saving the plot as a 'png' plt.savefig("4BarPlot.png", fontsize=18, fontweight="bold") # Displaying the bar plot plt.show()
[{"mqttset/Data/FINAL_CSV/train70_reduced.csv": {"column_names": "[\"tcp.flags\", \"tcp.time_delta\", \"tcp.len\", \"mqtt.conack.flags\", \"mqtt.conack.flags.reserved\", \"mqtt.conack.flags.sp\", \"mqtt.conack.val\", \"mqtt.conflag.cleansess\", \"mqtt.conflag.passwd\", \"mqtt.conflag.qos\", \"mqtt.conflag.reserved\", \"mqtt.conflag.retain\", \"mqtt.conflag.uname\", \"mqtt.conflag.willflag\", \"mqtt.conflags\", \"mqtt.dupflag\", \"mqtt.hdrflags\", \"mqtt.kalive\", \"mqtt.len\", \"mqtt.msg\", \"mqtt.msgid\", \"mqtt.msgtype\", \"mqtt.proto_len\", \"mqtt.protoname\", \"mqtt.qos\", \"mqtt.retain\", \"mqtt.sub.qos\", \"mqtt.suback.qos\", \"mqtt.ver\", \"mqtt.willmsg\", \"mqtt.willmsg_len\", \"mqtt.willtopic\", \"mqtt.willtopic_len\", \"target\"]", "column_data_types": "{\"tcp.flags\": \"object\", \"tcp.time_delta\": \"float64\", \"tcp.len\": \"int64\", \"mqtt.conack.flags\": \"object\", \"mqtt.conack.flags.reserved\": \"float64\", \"mqtt.conack.flags.sp\": \"float64\", \"mqtt.conack.val\": \"float64\", \"mqtt.conflag.cleansess\": \"float64\", \"mqtt.conflag.passwd\": \"float64\", \"mqtt.conflag.qos\": \"float64\", \"mqtt.conflag.reserved\": \"float64\", \"mqtt.conflag.retain\": \"float64\", \"mqtt.conflag.uname\": \"float64\", \"mqtt.conflag.willflag\": \"float64\", \"mqtt.conflags\": \"object\", \"mqtt.dupflag\": \"float64\", \"mqtt.hdrflags\": \"object\", \"mqtt.kalive\": \"float64\", \"mqtt.len\": \"float64\", \"mqtt.msg\": \"object\", \"mqtt.msgid\": \"float64\", \"mqtt.msgtype\": \"float64\", \"mqtt.proto_len\": \"float64\", \"mqtt.protoname\": \"object\", \"mqtt.qos\": \"float64\", \"mqtt.retain\": \"float64\", \"mqtt.sub.qos\": \"float64\", \"mqtt.suback.qos\": \"float64\", \"mqtt.ver\": \"float64\", \"mqtt.willmsg\": \"float64\", \"mqtt.willmsg_len\": \"float64\", \"mqtt.willtopic\": \"float64\", \"mqtt.willtopic_len\": \"float64\", \"target\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 231646 entries, 0 to 231645\nData columns (total 34 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 tcp.flags 231646 non-null object \n 1 tcp.time_delta 231646 non-null float64\n 2 tcp.len 231646 non-null int64 \n 3 mqtt.conack.flags 231646 non-null object \n 4 mqtt.conack.flags.reserved 231646 non-null float64\n 5 mqtt.conack.flags.sp 231646 non-null float64\n 6 mqtt.conack.val 231646 non-null float64\n 7 mqtt.conflag.cleansess 231646 non-null float64\n 8 mqtt.conflag.passwd 231646 non-null float64\n 9 mqtt.conflag.qos 231646 non-null float64\n 10 mqtt.conflag.reserved 231646 non-null float64\n 11 mqtt.conflag.retain 231646 non-null float64\n 12 mqtt.conflag.uname 231646 non-null float64\n 13 mqtt.conflag.willflag 231646 non-null float64\n 14 mqtt.conflags 231646 non-null object \n 15 mqtt.dupflag 231646 non-null float64\n 16 mqtt.hdrflags 231646 non-null object \n 17 mqtt.kalive 231646 non-null float64\n 18 mqtt.len 231646 non-null float64\n 19 mqtt.msg 231646 non-null object \n 20 mqtt.msgid 231646 non-null float64\n 21 mqtt.msgtype 231646 non-null float64\n 22 mqtt.proto_len 231646 non-null float64\n 23 mqtt.protoname 231646 non-null object \n 24 mqtt.qos 231646 non-null float64\n 25 mqtt.retain 231646 non-null float64\n 26 mqtt.sub.qos 231646 non-null float64\n 27 mqtt.suback.qos 231646 non-null float64\n 28 mqtt.ver 231646 non-null float64\n 29 mqtt.willmsg 231646 non-null float64\n 30 mqtt.willmsg_len 231646 non-null float64\n 31 mqtt.willtopic 231646 non-null float64\n 32 mqtt.willtopic_len 231646 non-null float64\n 33 target 231646 non-null object \ndtypes: float64(26), int64(1), object(7)\nmemory usage: 60.1+ MB\n", "summary": "{\"tcp.time_delta\": {\"count\": 231646.0, \"mean\": 0.2788670366982378, \"std\": 2.900768079075393, \"min\": -2e-06, \"25%\": 3e-06, \"50%\": 3e-05, \"75%\": 0.000197, \"max\": 60.000878}, \"tcp.len\": {\"count\": 231646.0, \"mean\": 149.96288301978018, \"std\": 947.5215046201207, \"min\": 0.0, \"25%\": 0.0, \"50%\": 10.0, \"75%\": 14.0, \"max\": 32768.0}, \"mqtt.conack.flags.reserved\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.conack.flags.sp\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.conack.val\": {\"count\": 231646.0, \"mean\": 0.02182209060376609, \"std\": 0.3295978130224612, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 5.0}, \"mqtt.conflag.cleansess\": {\"count\": 231646.0, \"mean\": 0.009570637956191775, \"std\": 0.09736057603493033, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"mqtt.conflag.passwd\": {\"count\": 231646.0, \"mean\": 0.004329882665791768, \"std\": 0.06565937399067799, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"mqtt.conflag.qos\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.conflag.reserved\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.conflag.retain\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.conflag.uname\": {\"count\": 231646.0, \"mean\": 0.004347150393272494, \"std\": 0.065789599190019, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"mqtt.conflag.willflag\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.dupflag\": {\"count\": 231646.0, \"mean\": 0.05241618676774043, \"std\": 0.2228653058461982, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"mqtt.kalive\": {\"count\": 231646.0, \"mean\": 175.53527796724313, \"std\": 3383.1709252659853, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 65535.0}, \"mqtt.len\": {\"count\": 231646.0, \"mean\": 31.435725201384873, \"std\": 61.311029572011364, \"min\": 0.0, \"25%\": 0.0, \"50%\": 2.0, \"75%\": 11.0, \"max\": 692.0}, \"mqtt.msgid\": {\"count\": 231646.0, \"mean\": 1162.9718147518197, \"std\": 2276.028665265134, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1025.0, \"max\": 9994.0}, \"mqtt.msgtype\": {\"count\": 231646.0, \"mean\": 2.0490317121815185, \"std\": 1.8569641171328626, \"min\": 0.0, \"25%\": 0.0, \"50%\": 3.0, \"75%\": 3.0, \"max\": 14.0}, \"mqtt.proto_len\": {\"count\": 231646.0, \"mean\": 0.0382825518247671, \"std\": 0.3894423041397213, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 4.0}, \"mqtt.qos\": {\"count\": 231646.0, \"mean\": 0.1629900796905623, \"std\": 0.3693574184310316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"mqtt.retain\": {\"count\": 231646.0, \"mean\": 0.00037557307270576657, \"std\": 0.019376109988701633, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"mqtt.sub.qos\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.suback.qos\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.ver\": {\"count\": 231646.0, \"mean\": 0.0382825518247671, \"std\": 0.3894423041397213, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 4.0}, \"mqtt.willmsg\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.willmsg_len\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.willtopic\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}, \"mqtt.willtopic_len\": {\"count\": 231646.0, \"mean\": 0.0, \"std\": 0.0, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.0}}", "examples": "{\"tcp.flags\":{\"0\":\"0x00000018\",\"1\":\"0x00000010\",\"2\":\"0x00000010\",\"3\":\"0x00000018\"},\"tcp.time_delta\":{\"0\":0.998867,\"1\":0.000067,\"2\":0.000058,\"3\":0.000227},\"tcp.len\":{\"0\":10,\"1\":1460,\"2\":1460,\"3\":10},\"mqtt.conack.flags\":{\"0\":\"0\",\"1\":\"0\",\"2\":\"0\",\"3\":\"0\"},\"mqtt.conack.flags.reserved\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.conack.flags.sp\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.conack.val\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.conflag.cleansess\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.conflag.passwd\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.conflag.qos\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.conflag.reserved\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.conflag.retain\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.conflag.uname\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.conflag.willflag\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.conflags\":{\"0\":\"0\",\"1\":\"0\",\"2\":\"0\",\"3\":\"0\"},\"mqtt.dupflag\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.hdrflags\":{\"0\":\"0x00000030\",\"1\":\"0x00000032\",\"2\":\"0x00000032\",\"3\":\"0x00000030\"},\"mqtt.kalive\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.len\":{\"0\":8.0,\"1\":169.0,\"2\":163.0,\"3\":8.0},\"mqtt.msg\":{\"0\":\"32\",\"1\":\"63616539436661446542664541624446343262306330416366456435634239346437304441373463314639303642313246323430466537444238333743644242443841613435344442436545313436314634454166366136304166306632366141386161\",\"2\":\"42326461413943334633346132326264463266463834464137316661343864343545446144353841304633314536463531353765636439666631313366634430313538443065414565343531363442623632333330666237446645396644\",\"3\":\"32\"},\"mqtt.msgid\":{\"0\":0.0,\"1\":2714.0,\"2\":1548.0,\"3\":0.0},\"mqtt.msgtype\":{\"0\":3.0,\"1\":3.0,\"2\":3.0,\"3\":3.0},\"mqtt.proto_len\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.protoname\":{\"0\":\"0\",\"1\":\"0\",\"2\":\"0\",\"3\":\"0\"},\"mqtt.qos\":{\"0\":0.0,\"1\":1.0,\"2\":1.0,\"3\":0.0},\"mqtt.retain\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.sub.qos\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.suback.qos\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.ver\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.willmsg\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.willmsg_len\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.willtopic\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"mqtt.willtopic_len\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"target\":{\"0\":\"legitimate\",\"1\":\"dos\",\"2\":\"dos\",\"3\":\"legitimate\"}}"}}]
true
2
<start_data_description><data_path>mqttset/Data/FINAL_CSV/train70_reduced.csv: <column_names> ['tcp.flags', 'tcp.time_delta', 'tcp.len', 'mqtt.conack.flags', 'mqtt.conack.flags.reserved', 'mqtt.conack.flags.sp', 'mqtt.conack.val', 'mqtt.conflag.cleansess', 'mqtt.conflag.passwd', 'mqtt.conflag.qos', 'mqtt.conflag.reserved', 'mqtt.conflag.retain', 'mqtt.conflag.uname', 'mqtt.conflag.willflag', 'mqtt.conflags', 'mqtt.dupflag', 'mqtt.hdrflags', 'mqtt.kalive', 'mqtt.len', 'mqtt.msg', 'mqtt.msgid', 'mqtt.msgtype', 'mqtt.proto_len', 'mqtt.protoname', 'mqtt.qos', 'mqtt.retain', 'mqtt.sub.qos', 'mqtt.suback.qos', 'mqtt.ver', 'mqtt.willmsg', 'mqtt.willmsg_len', 'mqtt.willtopic', 'mqtt.willtopic_len', 'target'] <column_types> {'tcp.flags': 'object', 'tcp.time_delta': 'float64', 'tcp.len': 'int64', 'mqtt.conack.flags': 'object', 'mqtt.conack.flags.reserved': 'float64', 'mqtt.conack.flags.sp': 'float64', 'mqtt.conack.val': 'float64', 'mqtt.conflag.cleansess': 'float64', 'mqtt.conflag.passwd': 'float64', 'mqtt.conflag.qos': 'float64', 'mqtt.conflag.reserved': 'float64', 'mqtt.conflag.retain': 'float64', 'mqtt.conflag.uname': 'float64', 'mqtt.conflag.willflag': 'float64', 'mqtt.conflags': 'object', 'mqtt.dupflag': 'float64', 'mqtt.hdrflags': 'object', 'mqtt.kalive': 'float64', 'mqtt.len': 'float64', 'mqtt.msg': 'object', 'mqtt.msgid': 'float64', 'mqtt.msgtype': 'float64', 'mqtt.proto_len': 'float64', 'mqtt.protoname': 'object', 'mqtt.qos': 'float64', 'mqtt.retain': 'float64', 'mqtt.sub.qos': 'float64', 'mqtt.suback.qos': 'float64', 'mqtt.ver': 'float64', 'mqtt.willmsg': 'float64', 'mqtt.willmsg_len': 'float64', 'mqtt.willtopic': 'float64', 'mqtt.willtopic_len': 'float64', 'target': 'object'} <dataframe_Summary> {'tcp.time_delta': {'count': 231646.0, 'mean': 0.2788670366982378, 'std': 2.900768079075393, 'min': -2e-06, '25%': 3e-06, '50%': 3e-05, '75%': 0.000197, 'max': 60.000878}, 'tcp.len': {'count': 231646.0, 'mean': 149.96288301978018, 'std': 947.5215046201207, 'min': 0.0, '25%': 0.0, '50%': 10.0, '75%': 14.0, 'max': 32768.0}, 'mqtt.conack.flags.reserved': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.conack.flags.sp': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.conack.val': {'count': 231646.0, 'mean': 0.02182209060376609, 'std': 0.3295978130224612, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 5.0}, 'mqtt.conflag.cleansess': {'count': 231646.0, 'mean': 0.009570637956191775, 'std': 0.09736057603493033, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'mqtt.conflag.passwd': {'count': 231646.0, 'mean': 0.004329882665791768, 'std': 0.06565937399067799, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'mqtt.conflag.qos': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.conflag.reserved': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.conflag.retain': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.conflag.uname': {'count': 231646.0, 'mean': 0.004347150393272494, 'std': 0.065789599190019, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'mqtt.conflag.willflag': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.dupflag': {'count': 231646.0, 'mean': 0.05241618676774043, 'std': 0.2228653058461982, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'mqtt.kalive': {'count': 231646.0, 'mean': 175.53527796724313, 'std': 3383.1709252659853, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 65535.0}, 'mqtt.len': {'count': 231646.0, 'mean': 31.435725201384873, 'std': 61.311029572011364, 'min': 0.0, '25%': 0.0, '50%': 2.0, '75%': 11.0, 'max': 692.0}, 'mqtt.msgid': {'count': 231646.0, 'mean': 1162.9718147518197, 'std': 2276.028665265134, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1025.0, 'max': 9994.0}, 'mqtt.msgtype': {'count': 231646.0, 'mean': 2.0490317121815185, 'std': 1.8569641171328626, 'min': 0.0, '25%': 0.0, '50%': 3.0, '75%': 3.0, 'max': 14.0}, 'mqtt.proto_len': {'count': 231646.0, 'mean': 0.0382825518247671, 'std': 0.3894423041397213, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 4.0}, 'mqtt.qos': {'count': 231646.0, 'mean': 0.1629900796905623, 'std': 0.3693574184310316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'mqtt.retain': {'count': 231646.0, 'mean': 0.00037557307270576657, 'std': 0.019376109988701633, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'mqtt.sub.qos': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.suback.qos': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.ver': {'count': 231646.0, 'mean': 0.0382825518247671, 'std': 0.3894423041397213, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 4.0}, 'mqtt.willmsg': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.willmsg_len': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.willtopic': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}, 'mqtt.willtopic_len': {'count': 231646.0, 'mean': 0.0, 'std': 0.0, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.0}} <dataframe_info> RangeIndex: 231646 entries, 0 to 231645 Data columns (total 34 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 tcp.flags 231646 non-null object 1 tcp.time_delta 231646 non-null float64 2 tcp.len 231646 non-null int64 3 mqtt.conack.flags 231646 non-null object 4 mqtt.conack.flags.reserved 231646 non-null float64 5 mqtt.conack.flags.sp 231646 non-null float64 6 mqtt.conack.val 231646 non-null float64 7 mqtt.conflag.cleansess 231646 non-null float64 8 mqtt.conflag.passwd 231646 non-null float64 9 mqtt.conflag.qos 231646 non-null float64 10 mqtt.conflag.reserved 231646 non-null float64 11 mqtt.conflag.retain 231646 non-null float64 12 mqtt.conflag.uname 231646 non-null float64 13 mqtt.conflag.willflag 231646 non-null float64 14 mqtt.conflags 231646 non-null object 15 mqtt.dupflag 231646 non-null float64 16 mqtt.hdrflags 231646 non-null object 17 mqtt.kalive 231646 non-null float64 18 mqtt.len 231646 non-null float64 19 mqtt.msg 231646 non-null object 20 mqtt.msgid 231646 non-null float64 21 mqtt.msgtype 231646 non-null float64 22 mqtt.proto_len 231646 non-null float64 23 mqtt.protoname 231646 non-null object 24 mqtt.qos 231646 non-null float64 25 mqtt.retain 231646 non-null float64 26 mqtt.sub.qos 231646 non-null float64 27 mqtt.suback.qos 231646 non-null float64 28 mqtt.ver 231646 non-null float64 29 mqtt.willmsg 231646 non-null float64 30 mqtt.willmsg_len 231646 non-null float64 31 mqtt.willtopic 231646 non-null float64 32 mqtt.willtopic_len 231646 non-null float64 33 target 231646 non-null object dtypes: float64(26), int64(1), object(7) memory usage: 60.1+ MB <some_examples> {'tcp.flags': {'0': '0x00000018', '1': '0x00000010', '2': '0x00000010', '3': '0x00000018'}, 'tcp.time_delta': {'0': 0.998867, '1': 6.7e-05, '2': 5.8e-05, '3': 0.000227}, 'tcp.len': {'0': 10, '1': 1460, '2': 1460, '3': 10}, 'mqtt.conack.flags': {'0': '0', '1': '0', '2': '0', '3': '0'}, 'mqtt.conack.flags.reserved': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.conack.flags.sp': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.conack.val': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.conflag.cleansess': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.conflag.passwd': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.conflag.qos': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.conflag.reserved': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.conflag.retain': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.conflag.uname': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.conflag.willflag': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.conflags': {'0': '0', '1': '0', '2': '0', '3': '0'}, 'mqtt.dupflag': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.hdrflags': {'0': '0x00000030', '1': '0x00000032', '2': '0x00000032', '3': '0x00000030'}, 'mqtt.kalive': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.len': {'0': 8.0, '1': 169.0, '2': 163.0, '3': 8.0}, 'mqtt.msg': {'0': '32', '1': '63616539436661446542664541624446343262306330416366456435634239346437304441373463314639303642313246323430466537444238333743644242443841613435344442436545313436314634454166366136304166306632366141386161', '2': '42326461413943334633346132326264463266463834464137316661343864343545446144353841304633314536463531353765636439666631313366634430313538443065414565343531363442623632333330666237446645396644', '3': '32'}, 'mqtt.msgid': {'0': 0.0, '1': 2714.0, '2': 1548.0, '3': 0.0}, 'mqtt.msgtype': {'0': 3.0, '1': 3.0, '2': 3.0, '3': 3.0}, 'mqtt.proto_len': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.protoname': {'0': '0', '1': '0', '2': '0', '3': '0'}, 'mqtt.qos': {'0': 0.0, '1': 1.0, '2': 1.0, '3': 0.0}, 'mqtt.retain': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.sub.qos': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.suback.qos': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.ver': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.willmsg': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.willmsg_len': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.willtopic': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'mqtt.willtopic_len': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'target': {'0': 'legitimate', '1': 'dos', '2': 'dos', '3': 'legitimate'}} <end_description>
4,999
1
8,702
4,999
69913845
# # Dataset Visualisation # This notebook is solely use to generate visualisations on the dataset. import os, collections, random, itertools import tqdm import numpy as np import pandas as pd import matplotlib.pyplot as plt for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # load data df = pd.read_csv("/kaggle/input/quora-question-pairs/train.csv.zip") df["question1"] = df["question1"].astype(str) # resolve nan df["question2"] = df["question2"].astype(str) df["qid1"] = df["qid1"] - 1 df["qid2"] = df["qid2"] - 1 maxidx = max(max(df["qid1"]), max(df["qid2"])) + 1 df.sample(10) # ### Indexing the questions # all questions are identified with its qid qid_to_question = {} for qid1, qid2, question1, question2 in zip( df["qid1"], df["qid2"], df["question1"], df["question2"] ): qid_to_question[qid1] = question1 qid_to_question[qid2] = question2 # ### Simple Analysis of the dataset print("Number of questions", len(qid_to_question)) print("Number of duplicate pairs", sum(df["is_duplicate"])) print( "Percentage of pairs that are duplicate {:.3f}%".format( sum(df["is_duplicate"]) / len(qid_to_question) * 100 ) ) qid_to_labelled_qids = collections.defaultdict(set) qid_to_duplicate_qids = collections.defaultdict(set) for qid1, qid2, is_duplicate in zip(df["qid1"], df["qid2"], df["is_duplicate"]): qid_to_labelled_qids[qid1].add(qid2) qid_to_labelled_qids[qid2].add(qid1) if is_duplicate: qid_to_duplicate_qids[qid1].add(qid2) qid_to_duplicate_qids[qid2].add(qid1) plt.figure(figsize=(14, 4)) plt.title("Number of labels for each question, and how many of which are duplicate") label_sizes = [len(qid_to_labelled_qids[qid]) for qid in qid_to_question] count, bins = np.histogram(label_sizes, bins=range(max(label_sizes) + 2)) count = count * bins[:-1] # convert number of groups to population plt.bar(bins[1:-1], count[1:], width=1, label="total") duplicate_sizes = [len(qid_to_duplicate_qids[qid]) for qid in qid_to_question] count, bins = np.histogram(duplicate_sizes, bins=range(max(duplicate_sizes) + 2)) count = count * bins[:-1] # convert number of groups to population plt.bar(bins[1:-1], count[1:], width=1, label="duplicate") plt.xlim(0, 50) plt.xlabel("Count") plt.ylabel("Dataset Frequency") plt.legend() plt.show() print("Largest label sizes:", sorted(label_sizes)[-20:]) print("Largest duplicate sizes:", sorted(duplicate_sizes)[-20:]) # ### Connecting similar questions import typing class DisjointSet: # https://github.com/not522/ac-library-python/blob/master/atcoder/dsu.py def __init__(self, n: int = 0) -> None: self._n = n self.parent_or_size = [-1] * n def union(self, a: int, b: int) -> int: assert 0 <= a < self._n assert 0 <= b < self._n x = self.leader(a) y = self.leader(b) if x == y: return x if -self.parent_or_size[x] < -self.parent_or_size[y]: x, y = y, x self.parent_or_size[x] += self.parent_or_size[y] self.parent_or_size[y] = x return x def same(self, a: int, b: int) -> bool: assert 0 <= a < self._n assert 0 <= b < self._n return self.leader(a) == self.leader(b) def find(self, a: int) -> int: return self.leader(a) def leader(self, a: int) -> int: assert 0 <= a < self._n parent = self.parent_or_size[a] while parent >= 0: if self.parent_or_size[parent] < 0: return parent self.parent_or_size[a], a, parent = ( self.parent_or_size[parent], self.parent_or_size[parent], self.parent_or_size[self.parent_or_size[parent]], ) return a def size(self, a: int) -> int: assert 0 <= a < self._n return -self.parent_or_size[self.leader(a)] def groups(self) -> typing.List[typing.List[int]]: leader_buf = [self.leader(i) for i in range(self._n)] result: typing.List[typing.List[int]] = [[] for _ in range(self._n)] for i in range(self._n): result[leader_buf[i]].append(i) return list(filter(lambda r: r, result)) # all questions are identified with its qid disjoint_set = DisjointSet(maxidx) for qid1, qid2, is_duplicate in zip(df["qid1"], df["qid2"], df["is_duplicate"]): if is_duplicate: disjoint_set.union(qid1, qid2) # ### Counting the number of inconsistent nonduplicate labels cnt = 0 for qid1, qid2, is_duplicate in zip(df["qid1"], df["qid2"], df["is_duplicate"]): if not is_duplicate: if disjoint_set.find(qid1) == disjoint_set.find(qid2): cnt += 1 if cnt < 10: print(qid_to_question[qid1], "\n", qid_to_question[qid2], "\n") print(cnt) # ### Visualising the group size of similar questions group_sizes = np.array([len(group) for group in disjoint_set.groups()]) count, bins = np.histogram(group_sizes, bins=range(max(group_sizes) + 2)) count = count * bins[:-1] # convert number of groups to population plt.figure(figsize=(14, 4)) plt.title("Group size of similar questions") plt.bar(bins[2:-1], count[2:], width=1) plt.xlim(0, 50) plt.xlabel("Group Size") plt.ylabel("Dataset Frequency") plt.show() print("Largest group sizes:", sorted(group_sizes)[-20:]) # ### Counting the number of augmented connections initial_connection_count = sum(duplicate_sizes) final_connection_count = sum(group_sizes) initial_connection_count, final_connection_count, final_connection_count - initial_connection_count # ### Visualise distribution of overlapping word count for duplicate pairs # define tokenisation process import pickle, functools qid_to_tokens_preprocessed_filename = "../input/quora-question-pairs-tokenise-pipeline/qid_to_processed_token_list_tokenise_then_spellcheck.pkl" with open(qid_to_tokens_preprocessed_filename, "rb") as f: qid_to_tokens_preprocessed = pickle.load(f) from nltk.corpus import stopwords from nltk.tokenize import word_tokenize stopword_set = set(stopwords.words()) stopword_set.update(["?"]) @functools.lru_cache(maxsize=None) def tokenise_qid(qid, qid_to_tokens_preprocessed=qid_to_tokens_preprocessed): if qid_to_tokens_preprocessed: return qid_to_tokens_preprocessed[qid] sentence = qid_to_question[qid] return word_tokenize(sentence.lower()) groups = disjoint_set.groups() overlap_count_duplicate = [] for group in tqdm.tqdm(groups): for qid1, qid2 in itertools.combinations(group, r=2): overlapping_tokens = set(tokenise_qid(qid1)) & set(tokenise_qid(qid2)) overlapping_tokens = list( token for token in overlapping_tokens if token not in stopword_set ) overlap_count_duplicate.append(len(overlapping_tokens)) overlap_count_random = [] sample1 = random.sample(qid_to_question.keys(), 20000) sample2 = random.sample(qid_to_question.keys(), 20000) for qid1, qid2 in zip(sample1, sample2): overlapping_tokens = set(tokenise_qid(qid1)) & set(tokenise_qid(qid2)) overlapping_tokens = list( token for token in overlapping_tokens if token not in stopword_set ) overlap_count_random.append(len(overlapping_tokens)) plt.figure(figsize=(14, 4)) plt.hist( overlap_count_duplicate, bins=range(15), density=True, alpha=0.5, label="duplicate pair", ) plt.hist( overlap_count_random, bins=range(15), density=True, alpha=0.5, label="random pair" ) plt.title( "Distribution of overlapping non-root word tokens for duplicate pairs and random pairs" ) plt.legend() plt.show() # ### Understand the most frequent non rootword tokens from nltk.corpus import stopwords stopword_set = set(stopwords.words()) import pickle import random from nltk.corpus import stopwords stopword_set = set(stopwords.words()) with open( "../input/quora-question-pairs-tokenise-pipeline/qid_to_processed_token_list_spellcheck_then_tokenise.pkl", "rb", ) as f: qid_to_tokens = pickle.load(f) # with open("../input/quora-question-pairs-tokenise-pipeline/token_to_qid_tokenise_then_spellcheck.pkl", "rb") as f: # token_to_qids = pickle.load(f) token_to_qids = collections.defaultdict(set) for qid, tokens in qid_to_tokens.items(): for token in tokens: token_to_qids[token].add(qid) # most common non-stop words, the question mark has been excluded sorted( [(len(v), k) for k, v in token_to_qids.items() if k not in stopword_set], reverse=True, )[:20] # ### Visualise distribution of the number of questions to compare against token_length_sizes = [] considered_set_sizes = [] for qid in tqdm.tqdm(random.sample(qid_to_tokens.keys(), 10000)): considered_set = set() for token in qid_to_tokens[qid]: if token in stopword_set: continue if ( token in token_to_qids ): # some tokens are not found in the token_to_qids (probably from test set) for considered_qid in token_to_qids[token]: considered_set.add(considered_qid) token_length_sizes.append(len(set(qid_to_tokens[qid]))) considered_set_sizes.append(len(considered_set)) plt.figure(figsize=(14, 4)) plt.hist(considered_set_sizes, bins=np.arange(0, 70000, 1000), density=True) plt.title("How many other questions has at least one common non-rootword token") plt.xlabel("Query comparison size") plt.legend() plt.show() plt.figure(figsize=(14, 4)) plt.scatter(considered_set_sizes, token_length_sizes, alpha=0.1) plt.title("Relationship between number of unique tokens and query comparison size") plt.ylabel("Number of unique tokens") plt.xlabel("Query comparison size") plt.xlim(None, 70000) plt.show() # ### Visualise distribution of sentence vectors # model_name = "bert-base-nli-stsb-mean-tokens" # sentence_vectors = np.load(f"../input/quora-question-pairs-bert-sentence-vectors/sentence_vectors_{model_name}.npy") with open( "../input/quora-question-pairs-tokenise-pipeline/qid_to_vec_trf.pkl", "rb" ) as f: qid_to_vec = pickle.load(f) sentence_vectors = [] for idx in sorted(qid_to_vec.keys()): sentence_vectors.append(qid_to_vec[idx]) sentence_vectors = np.array(sentence_vectors) largest_groups = sorted(disjoint_set.groups(), key=len)[-1:] qids_of_largest_groups = np.array(sum(largest_groups, [])) # flatten from sklearn.decomposition import PCA pca = PCA(n_components=2) sentence_pca = pca.fit_transform(sentence_vectors) from matplotlib import collections as mc def plot_2d_distribution(vectors_2d, qids_to_connect, title=""): qids_to_connect = set(qids_to_connect) plt.figure(figsize=(10, 8)) plt.scatter(*list(zip(*vectors_2d))[:2], s=1, alpha=0.1) lines = [] for qid1, qid2, is_duplicate in zip(df["qid1"], df["qid2"], df["is_duplicate"]): if is_duplicate and qid1 in qids_to_connect and qid2 in qids_to_connect: lines.append([vectors_2d[qid1][:2], vectors_2d[qid2][:2]]) lc = mc.LineCollection( lines, color="red", alpha=0.2, linewidths=[ 1 / (0.1 + (a - c) ** 2 + (b - d) ** 2) ** 0.5 for (a, b), (c, d) in lines ], ) plt.gca().add_collection(lc) plt.title(title) plt.show() plot_2d_distribution( sentence_pca, qids_of_largest_groups, "Plot of PCA projection of all sentence vecotrs, 5 largest groups highlighted", ) from MulticoreTSNE import MulticoreTSNE as TSNE # it was recommended by scipy that we first reduce the dimensions qids_to_fit_tsne = np.array( list( set(qids_of_largest_groups) | set(random.sample(qid_to_question.keys(), 20000)) ) ) sentence_pca = PCA(n_components=50).fit_transform(sentence_vectors[qids_to_fit_tsne]) tsne = TSNE(n_jobs=4) sentence_tsne = np.empty((sentence_vectors.shape[0], 2)) sentence_tsne[:] = np.nan sentence_tsne[qids_to_fit_tsne] = tsne.fit_transform(sentence_pca) plot_2d_distribution( sentence_tsne, qids_of_largest_groups, "Plot of T-SNE projection of 20 largest groups and 20000 other questions", ) # #### Duplicate questions with largest cosine distance of sentence vectors import itertools from scipy.spatial.distance import cosine distances = [] for group in tqdm.tqdm(disjoint_set.groups()): for qid1, qid2 in itertools.combinations(group, r=2): distance = cosine(sentence_vectors[qid1], sentence_vectors[qid2]) distances.append((distance, qid1, qid2)) distances = sorted(distances) for distance, qid1, qid2 in distances[-10:]: print( f"Distance: {distance:.2f}\n{qid_to_question[qid1]}\n{qid_to_question[qid2]}\n" )
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/913/69913845.ipynb
null
null
[{"Id": 69913845, "ScriptId": 17570628, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1680925, "CreationDate": "08/03/2021 20:20:46", "VersionNumber": 8.0, "Title": "quora-question-pair-dataset-visualisation", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 347.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 335.0, "LinesInsertedFromFork": 310.0, "LinesDeletedFromFork": 201.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 37.0, "TotalVotes": 0}]
null
null
null
null
# # Dataset Visualisation # This notebook is solely use to generate visualisations on the dataset. import os, collections, random, itertools import tqdm import numpy as np import pandas as pd import matplotlib.pyplot as plt for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # load data df = pd.read_csv("/kaggle/input/quora-question-pairs/train.csv.zip") df["question1"] = df["question1"].astype(str) # resolve nan df["question2"] = df["question2"].astype(str) df["qid1"] = df["qid1"] - 1 df["qid2"] = df["qid2"] - 1 maxidx = max(max(df["qid1"]), max(df["qid2"])) + 1 df.sample(10) # ### Indexing the questions # all questions are identified with its qid qid_to_question = {} for qid1, qid2, question1, question2 in zip( df["qid1"], df["qid2"], df["question1"], df["question2"] ): qid_to_question[qid1] = question1 qid_to_question[qid2] = question2 # ### Simple Analysis of the dataset print("Number of questions", len(qid_to_question)) print("Number of duplicate pairs", sum(df["is_duplicate"])) print( "Percentage of pairs that are duplicate {:.3f}%".format( sum(df["is_duplicate"]) / len(qid_to_question) * 100 ) ) qid_to_labelled_qids = collections.defaultdict(set) qid_to_duplicate_qids = collections.defaultdict(set) for qid1, qid2, is_duplicate in zip(df["qid1"], df["qid2"], df["is_duplicate"]): qid_to_labelled_qids[qid1].add(qid2) qid_to_labelled_qids[qid2].add(qid1) if is_duplicate: qid_to_duplicate_qids[qid1].add(qid2) qid_to_duplicate_qids[qid2].add(qid1) plt.figure(figsize=(14, 4)) plt.title("Number of labels for each question, and how many of which are duplicate") label_sizes = [len(qid_to_labelled_qids[qid]) for qid in qid_to_question] count, bins = np.histogram(label_sizes, bins=range(max(label_sizes) + 2)) count = count * bins[:-1] # convert number of groups to population plt.bar(bins[1:-1], count[1:], width=1, label="total") duplicate_sizes = [len(qid_to_duplicate_qids[qid]) for qid in qid_to_question] count, bins = np.histogram(duplicate_sizes, bins=range(max(duplicate_sizes) + 2)) count = count * bins[:-1] # convert number of groups to population plt.bar(bins[1:-1], count[1:], width=1, label="duplicate") plt.xlim(0, 50) plt.xlabel("Count") plt.ylabel("Dataset Frequency") plt.legend() plt.show() print("Largest label sizes:", sorted(label_sizes)[-20:]) print("Largest duplicate sizes:", sorted(duplicate_sizes)[-20:]) # ### Connecting similar questions import typing class DisjointSet: # https://github.com/not522/ac-library-python/blob/master/atcoder/dsu.py def __init__(self, n: int = 0) -> None: self._n = n self.parent_or_size = [-1] * n def union(self, a: int, b: int) -> int: assert 0 <= a < self._n assert 0 <= b < self._n x = self.leader(a) y = self.leader(b) if x == y: return x if -self.parent_or_size[x] < -self.parent_or_size[y]: x, y = y, x self.parent_or_size[x] += self.parent_or_size[y] self.parent_or_size[y] = x return x def same(self, a: int, b: int) -> bool: assert 0 <= a < self._n assert 0 <= b < self._n return self.leader(a) == self.leader(b) def find(self, a: int) -> int: return self.leader(a) def leader(self, a: int) -> int: assert 0 <= a < self._n parent = self.parent_or_size[a] while parent >= 0: if self.parent_or_size[parent] < 0: return parent self.parent_or_size[a], a, parent = ( self.parent_or_size[parent], self.parent_or_size[parent], self.parent_or_size[self.parent_or_size[parent]], ) return a def size(self, a: int) -> int: assert 0 <= a < self._n return -self.parent_or_size[self.leader(a)] def groups(self) -> typing.List[typing.List[int]]: leader_buf = [self.leader(i) for i in range(self._n)] result: typing.List[typing.List[int]] = [[] for _ in range(self._n)] for i in range(self._n): result[leader_buf[i]].append(i) return list(filter(lambda r: r, result)) # all questions are identified with its qid disjoint_set = DisjointSet(maxidx) for qid1, qid2, is_duplicate in zip(df["qid1"], df["qid2"], df["is_duplicate"]): if is_duplicate: disjoint_set.union(qid1, qid2) # ### Counting the number of inconsistent nonduplicate labels cnt = 0 for qid1, qid2, is_duplicate in zip(df["qid1"], df["qid2"], df["is_duplicate"]): if not is_duplicate: if disjoint_set.find(qid1) == disjoint_set.find(qid2): cnt += 1 if cnt < 10: print(qid_to_question[qid1], "\n", qid_to_question[qid2], "\n") print(cnt) # ### Visualising the group size of similar questions group_sizes = np.array([len(group) for group in disjoint_set.groups()]) count, bins = np.histogram(group_sizes, bins=range(max(group_sizes) + 2)) count = count * bins[:-1] # convert number of groups to population plt.figure(figsize=(14, 4)) plt.title("Group size of similar questions") plt.bar(bins[2:-1], count[2:], width=1) plt.xlim(0, 50) plt.xlabel("Group Size") plt.ylabel("Dataset Frequency") plt.show() print("Largest group sizes:", sorted(group_sizes)[-20:]) # ### Counting the number of augmented connections initial_connection_count = sum(duplicate_sizes) final_connection_count = sum(group_sizes) initial_connection_count, final_connection_count, final_connection_count - initial_connection_count # ### Visualise distribution of overlapping word count for duplicate pairs # define tokenisation process import pickle, functools qid_to_tokens_preprocessed_filename = "../input/quora-question-pairs-tokenise-pipeline/qid_to_processed_token_list_tokenise_then_spellcheck.pkl" with open(qid_to_tokens_preprocessed_filename, "rb") as f: qid_to_tokens_preprocessed = pickle.load(f) from nltk.corpus import stopwords from nltk.tokenize import word_tokenize stopword_set = set(stopwords.words()) stopword_set.update(["?"]) @functools.lru_cache(maxsize=None) def tokenise_qid(qid, qid_to_tokens_preprocessed=qid_to_tokens_preprocessed): if qid_to_tokens_preprocessed: return qid_to_tokens_preprocessed[qid] sentence = qid_to_question[qid] return word_tokenize(sentence.lower()) groups = disjoint_set.groups() overlap_count_duplicate = [] for group in tqdm.tqdm(groups): for qid1, qid2 in itertools.combinations(group, r=2): overlapping_tokens = set(tokenise_qid(qid1)) & set(tokenise_qid(qid2)) overlapping_tokens = list( token for token in overlapping_tokens if token not in stopword_set ) overlap_count_duplicate.append(len(overlapping_tokens)) overlap_count_random = [] sample1 = random.sample(qid_to_question.keys(), 20000) sample2 = random.sample(qid_to_question.keys(), 20000) for qid1, qid2 in zip(sample1, sample2): overlapping_tokens = set(tokenise_qid(qid1)) & set(tokenise_qid(qid2)) overlapping_tokens = list( token for token in overlapping_tokens if token not in stopword_set ) overlap_count_random.append(len(overlapping_tokens)) plt.figure(figsize=(14, 4)) plt.hist( overlap_count_duplicate, bins=range(15), density=True, alpha=0.5, label="duplicate pair", ) plt.hist( overlap_count_random, bins=range(15), density=True, alpha=0.5, label="random pair" ) plt.title( "Distribution of overlapping non-root word tokens for duplicate pairs and random pairs" ) plt.legend() plt.show() # ### Understand the most frequent non rootword tokens from nltk.corpus import stopwords stopword_set = set(stopwords.words()) import pickle import random from nltk.corpus import stopwords stopword_set = set(stopwords.words()) with open( "../input/quora-question-pairs-tokenise-pipeline/qid_to_processed_token_list_spellcheck_then_tokenise.pkl", "rb", ) as f: qid_to_tokens = pickle.load(f) # with open("../input/quora-question-pairs-tokenise-pipeline/token_to_qid_tokenise_then_spellcheck.pkl", "rb") as f: # token_to_qids = pickle.load(f) token_to_qids = collections.defaultdict(set) for qid, tokens in qid_to_tokens.items(): for token in tokens: token_to_qids[token].add(qid) # most common non-stop words, the question mark has been excluded sorted( [(len(v), k) for k, v in token_to_qids.items() if k not in stopword_set], reverse=True, )[:20] # ### Visualise distribution of the number of questions to compare against token_length_sizes = [] considered_set_sizes = [] for qid in tqdm.tqdm(random.sample(qid_to_tokens.keys(), 10000)): considered_set = set() for token in qid_to_tokens[qid]: if token in stopword_set: continue if ( token in token_to_qids ): # some tokens are not found in the token_to_qids (probably from test set) for considered_qid in token_to_qids[token]: considered_set.add(considered_qid) token_length_sizes.append(len(set(qid_to_tokens[qid]))) considered_set_sizes.append(len(considered_set)) plt.figure(figsize=(14, 4)) plt.hist(considered_set_sizes, bins=np.arange(0, 70000, 1000), density=True) plt.title("How many other questions has at least one common non-rootword token") plt.xlabel("Query comparison size") plt.legend() plt.show() plt.figure(figsize=(14, 4)) plt.scatter(considered_set_sizes, token_length_sizes, alpha=0.1) plt.title("Relationship between number of unique tokens and query comparison size") plt.ylabel("Number of unique tokens") plt.xlabel("Query comparison size") plt.xlim(None, 70000) plt.show() # ### Visualise distribution of sentence vectors # model_name = "bert-base-nli-stsb-mean-tokens" # sentence_vectors = np.load(f"../input/quora-question-pairs-bert-sentence-vectors/sentence_vectors_{model_name}.npy") with open( "../input/quora-question-pairs-tokenise-pipeline/qid_to_vec_trf.pkl", "rb" ) as f: qid_to_vec = pickle.load(f) sentence_vectors = [] for idx in sorted(qid_to_vec.keys()): sentence_vectors.append(qid_to_vec[idx]) sentence_vectors = np.array(sentence_vectors) largest_groups = sorted(disjoint_set.groups(), key=len)[-1:] qids_of_largest_groups = np.array(sum(largest_groups, [])) # flatten from sklearn.decomposition import PCA pca = PCA(n_components=2) sentence_pca = pca.fit_transform(sentence_vectors) from matplotlib import collections as mc def plot_2d_distribution(vectors_2d, qids_to_connect, title=""): qids_to_connect = set(qids_to_connect) plt.figure(figsize=(10, 8)) plt.scatter(*list(zip(*vectors_2d))[:2], s=1, alpha=0.1) lines = [] for qid1, qid2, is_duplicate in zip(df["qid1"], df["qid2"], df["is_duplicate"]): if is_duplicate and qid1 in qids_to_connect and qid2 in qids_to_connect: lines.append([vectors_2d[qid1][:2], vectors_2d[qid2][:2]]) lc = mc.LineCollection( lines, color="red", alpha=0.2, linewidths=[ 1 / (0.1 + (a - c) ** 2 + (b - d) ** 2) ** 0.5 for (a, b), (c, d) in lines ], ) plt.gca().add_collection(lc) plt.title(title) plt.show() plot_2d_distribution( sentence_pca, qids_of_largest_groups, "Plot of PCA projection of all sentence vecotrs, 5 largest groups highlighted", ) from MulticoreTSNE import MulticoreTSNE as TSNE # it was recommended by scipy that we first reduce the dimensions qids_to_fit_tsne = np.array( list( set(qids_of_largest_groups) | set(random.sample(qid_to_question.keys(), 20000)) ) ) sentence_pca = PCA(n_components=50).fit_transform(sentence_vectors[qids_to_fit_tsne]) tsne = TSNE(n_jobs=4) sentence_tsne = np.empty((sentence_vectors.shape[0], 2)) sentence_tsne[:] = np.nan sentence_tsne[qids_to_fit_tsne] = tsne.fit_transform(sentence_pca) plot_2d_distribution( sentence_tsne, qids_of_largest_groups, "Plot of T-SNE projection of 20 largest groups and 20000 other questions", ) # #### Duplicate questions with largest cosine distance of sentence vectors import itertools from scipy.spatial.distance import cosine distances = [] for group in tqdm.tqdm(disjoint_set.groups()): for qid1, qid2 in itertools.combinations(group, r=2): distance = cosine(sentence_vectors[qid1], sentence_vectors[qid2]) distances.append((distance, qid1, qid2)) distances = sorted(distances) for distance, qid1, qid2 in distances[-10:]: print( f"Distance: {distance:.2f}\n{qid_to_question[qid1]}\n{qid_to_question[qid2]}\n" )
false
0
4,115
0
4,115
4,115
69887953
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data train_data.info() train_data.shape train_data.describe() train_data.isnull().sum() test_data.isnull().sum() train_data.drop("Cabin", axis=1, inplace=True) test_data.drop("Cabin", axis=1, inplace=True) median_age = train_data["Age"].median() train_data["Age"].replace(np.nan, median_age, inplace=True) median_age median_age = test_data["Age"].median() test_data["Age"].replace(np.nan, median_age, inplace=True) freq_port = train_data.Embarked.dropna().mode()[0] train_data["Embarked"] = train_data["Embarked"].fillna(freq_port) freq_port train_data.isnull().sum() test_data.isnull().sum() median_fare = test_data["Fare"].median() test_data["Fare"].replace(np.nan, median_age, inplace=True) median_fare train_data.isnull().sum() test_data.isnull().sum() sns.countplot(x="Survived", hue="Sex", data=train_data) women = train_data.loc[train_data.Sex == "female"]["Survived"] rate_women = sum(women) / len(women) * 100 print(" % of women survivers : ", rate_women) men = train_data.loc[train_data.Sex == "male"]["Survived"] rate_men = sum(men) / len(men) * 100 print(" % of men survivers : ", rate_men) sns.countplot(x="Survived", hue="Pclass", data=train_data) class1 = train_data.loc[train_data.Pclass == 1]["Survived"] rate_class1 = sum(class1) / len(class1) * 100 print(" % of class1 survivers : ", rate_class1) class2 = train_data.loc[train_data.Pclass == 2]["Survived"] rate_class2 = sum(class2) / len(class2) * 100 print(" % of class2 survivers : ", rate_class2) class3 = train_data.loc[train_data.Pclass == 3]["Survived"] rate_class3 = sum(class3) / len(class3) * 100 print(" % of class3 survivers : ", rate_class3) train_data["Sex"] = train_data["Sex"].map({"female": 1, "male": 0}).astype(int) test_data["Sex"] = test_data["Sex"].map({"female": 1, "male": 0}).astype(int) emb_dummy = pd.get_dummies(train_data["Embarked"]) train_data = pd.concat([train_data, emb_dummy], axis=1) emb_dummy2 = pd.get_dummies(test_data["Embarked"]) test_data = pd.concat([test_data, emb_dummy2], axis=1) train_data.head() drop_cols = ["Name", "Ticket", "Fare", "Embarked"] train_data = train_data.drop(drop_cols, axis=1) train_data = train_data.drop(["PassengerId"], axis=1) test_data = test_data.drop(drop_cols, axis=1) train_data.head() test_data.head() train_data.loc[train_data["Age"] <= 16, "Age"] = 0 train_data.loc[(train_data["Age"] > 16) & (train_data["Age"] <= 36), "Age"] = 1 train_data.loc[(train_data["Age"] > 36) & (train_data["Age"] <= 50), "Age"] = 2 train_data.loc[(train_data["Age"] > 50) & (train_data["Age"] <= 64), "Age"] = 3 train_data.loc[train_data["Age"] > 64, "Age"] = 4 train_data.head() test_data.loc[test_data["Age"] <= 16, "Age"] = 0 test_data.loc[(test_data["Age"] > 16) & (test_data["Age"] <= 36), "Age"] = 1 test_data.loc[(test_data["Age"] > 36) & (test_data["Age"] <= 50), "Age"] = 2 test_data.loc[(test_data["Age"] > 50) & (test_data["Age"] <= 64), "Age"] = 3 test_data.loc[test_data["Age"] > 64, "Age"] = 4 test_data.head() X_train = train_data.drop(["Survived"], axis=1).values Y_train = train_data["Survived"].values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X_train, Y_train, test_size=0.25) from sklearn.linear_model import LogisticRegression regressor = LogisticRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) from sklearn.metrics import accuracy_score, confusion_matrix acc = accuracy_score(y_test, y_pred) acc cm = confusion_matrix(y_test, y_pred) cm from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier() dt.fit(x_train, y_train) y_pred1 = dt.predict(x_test) from sklearn.metrics import accuracy_score, confusion_matrix acc2 = accuracy_score(y_test, y_pred1) acc2 from sklearn.ensemble import RandomForestClassifier rc = RandomForestClassifier(max_depth=9, random_state=0) rc.fit(x_train, y_train) y_pred2 = rc.predict(x_test) acc3 = accuracy_score(y_test, y_pred2) acc3 test = test_data.drop(["PassengerId"], axis=1) final_pred = regressor.predict(test) test_data["Survived"] = final_pred test_data.drop( ["Pclass", "Age", "Sex", "SibSp", "Parch", "C", "Q", "S"], inplace=True, axis=1 ) test_data.to_csv("Submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/887/69887953.ipynb
null
null
[{"Id": 69887953, "ScriptId": 19105609, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5090853, "CreationDate": "08/03/2021 18:12:10", "VersionNumber": 1.0, "Title": "notebook4e90aea3d9", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 176.0, "LinesInsertedFromPrevious": 176.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data train_data.info() train_data.shape train_data.describe() train_data.isnull().sum() test_data.isnull().sum() train_data.drop("Cabin", axis=1, inplace=True) test_data.drop("Cabin", axis=1, inplace=True) median_age = train_data["Age"].median() train_data["Age"].replace(np.nan, median_age, inplace=True) median_age median_age = test_data["Age"].median() test_data["Age"].replace(np.nan, median_age, inplace=True) freq_port = train_data.Embarked.dropna().mode()[0] train_data["Embarked"] = train_data["Embarked"].fillna(freq_port) freq_port train_data.isnull().sum() test_data.isnull().sum() median_fare = test_data["Fare"].median() test_data["Fare"].replace(np.nan, median_age, inplace=True) median_fare train_data.isnull().sum() test_data.isnull().sum() sns.countplot(x="Survived", hue="Sex", data=train_data) women = train_data.loc[train_data.Sex == "female"]["Survived"] rate_women = sum(women) / len(women) * 100 print(" % of women survivers : ", rate_women) men = train_data.loc[train_data.Sex == "male"]["Survived"] rate_men = sum(men) / len(men) * 100 print(" % of men survivers : ", rate_men) sns.countplot(x="Survived", hue="Pclass", data=train_data) class1 = train_data.loc[train_data.Pclass == 1]["Survived"] rate_class1 = sum(class1) / len(class1) * 100 print(" % of class1 survivers : ", rate_class1) class2 = train_data.loc[train_data.Pclass == 2]["Survived"] rate_class2 = sum(class2) / len(class2) * 100 print(" % of class2 survivers : ", rate_class2) class3 = train_data.loc[train_data.Pclass == 3]["Survived"] rate_class3 = sum(class3) / len(class3) * 100 print(" % of class3 survivers : ", rate_class3) train_data["Sex"] = train_data["Sex"].map({"female": 1, "male": 0}).astype(int) test_data["Sex"] = test_data["Sex"].map({"female": 1, "male": 0}).astype(int) emb_dummy = pd.get_dummies(train_data["Embarked"]) train_data = pd.concat([train_data, emb_dummy], axis=1) emb_dummy2 = pd.get_dummies(test_data["Embarked"]) test_data = pd.concat([test_data, emb_dummy2], axis=1) train_data.head() drop_cols = ["Name", "Ticket", "Fare", "Embarked"] train_data = train_data.drop(drop_cols, axis=1) train_data = train_data.drop(["PassengerId"], axis=1) test_data = test_data.drop(drop_cols, axis=1) train_data.head() test_data.head() train_data.loc[train_data["Age"] <= 16, "Age"] = 0 train_data.loc[(train_data["Age"] > 16) & (train_data["Age"] <= 36), "Age"] = 1 train_data.loc[(train_data["Age"] > 36) & (train_data["Age"] <= 50), "Age"] = 2 train_data.loc[(train_data["Age"] > 50) & (train_data["Age"] <= 64), "Age"] = 3 train_data.loc[train_data["Age"] > 64, "Age"] = 4 train_data.head() test_data.loc[test_data["Age"] <= 16, "Age"] = 0 test_data.loc[(test_data["Age"] > 16) & (test_data["Age"] <= 36), "Age"] = 1 test_data.loc[(test_data["Age"] > 36) & (test_data["Age"] <= 50), "Age"] = 2 test_data.loc[(test_data["Age"] > 50) & (test_data["Age"] <= 64), "Age"] = 3 test_data.loc[test_data["Age"] > 64, "Age"] = 4 test_data.head() X_train = train_data.drop(["Survived"], axis=1).values Y_train = train_data["Survived"].values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X_train, Y_train, test_size=0.25) from sklearn.linear_model import LogisticRegression regressor = LogisticRegression() regressor.fit(x_train, y_train) y_pred = regressor.predict(x_test) from sklearn.metrics import accuracy_score, confusion_matrix acc = accuracy_score(y_test, y_pred) acc cm = confusion_matrix(y_test, y_pred) cm from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier() dt.fit(x_train, y_train) y_pred1 = dt.predict(x_test) from sklearn.metrics import accuracy_score, confusion_matrix acc2 = accuracy_score(y_test, y_pred1) acc2 from sklearn.ensemble import RandomForestClassifier rc = RandomForestClassifier(max_depth=9, random_state=0) rc.fit(x_train, y_train) y_pred2 = rc.predict(x_test) acc3 = accuracy_score(y_test, y_pred2) acc3 test = test_data.drop(["PassengerId"], axis=1) final_pred = regressor.predict(test) test_data["Survived"] = final_pred test_data.drop( ["Pclass", "Age", "Sex", "SibSp", "Parch", "C", "Q", "S"], inplace=True, axis=1 ) test_data.to_csv("Submission.csv", index=False)
false
0
1,908
0
1,908
1,908
69887508
### import pandas and numpy import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # Data Visualization Library import seaborn as sns # Data Visualization Library # to present all column in results pd.set_option("display.max_columns", None) # read the train dataset df_train = pd.read_csv("/kaggle/input/titanic/train.csv") df_test = pd.read_csv("/kaggle/input/titanic/test.csv") # # Data Exploration df_train.head() df_train.info() df_test.info() # # EDA (Exploratry Data Analysis) # **Getting important information about numircal and Catagorial data and getting some insights from it** # getting statistics information from numircal data df_train.describe() # getting information for catagorial data print(df_train.describe(exclude=[np.number]).shape) df_train.describe(exclude=[np.number]) # ##### we can see from above scdule that the name is not important column because it like index that not have relation with data so I will drop it later # ##### Also we can note the number of male is more than females # Getting the distribution of passenger ages df_train.Age.hist(bins=100, rwidth=0.8, figsize=(14, 4)) plt.title("Age") plt.show() df_train.shape # Get information about different values in all features to study if there is a missing or unlogical values for col in df_train.select_dtypes(include=["object"]): print(f"For column {col}\n------------------\n") print(df_train[col].value_counts()) print("\n") df_train.isna().sum()[df_train.isna().sum() != 0] df_test.isna().sum()[df_train.isna().sum() != 0] # data is very low so it is not the best option to drop any of it and it is better to fill it with proper values df_train["Embarked"] = df_train["Embarked"].fillna("S") df_train["Cabin"] = df_train["Cabin"].fillna("others") df_test["Embarked"] = df_test["Embarked"].fillna("S") df_test["Cabin"] = df_test["Cabin"].fillna("others") # cat_cols["Embarked"].value_counts() sns.boxplot(x="Survived", y="Age", data=df_train) from sklearn.impute import SimpleImputer null_cols = df_train.columns[df_train.isna().any() == True].tolist() imputer = SimpleImputer(missing_values=np.NAN, strategy="mean") imputer = imputer.fit(df_train[null_cols]) clean_cols = imputer.transform(df_train[null_cols]) df_train[null_cols] = clean_cols df_train.head() # for test data null_cols_test = df_test.columns[df_test.isna().any() == True].tolist() imputer_test = SimpleImputer(missing_values=np.NAN, strategy="mean") imputer_test = imputer_test.fit(df_test[null_cols_test]) clean_cols_test = imputer_test.transform(df_test[null_cols_test]) df_test[null_cols_test] = clean_cols_test print(df_train.isna().sum()[df_train.isna().sum() != 0]) print(df_test.isna().sum()[df_train.isna().sum() != 0]) figure = plt.figure(figsize=(12, 12)) sns.heatmap(df_train.corr(), annot=True) plt.show() # #### We need to drop the name and passengerId column from our data because they are not related or affected on result df_train.drop(axis=1, columns=["Name", "PassengerId"], inplace=True) df_test.drop(axis=1, columns=["Name", "PassengerId"], inplace=True) # Encoding catagorial data in may model using label encoding method cat_cols = df_train.select_dtypes(include=["object"]) cols_names = cat_cols.columns print(cols_names) from sklearn.preprocessing import LabelEncoder labelEncoder = LabelEncoder() for col in cols_names: df_train[col] = labelEncoder.fit_transform(df_train[col]) df_test[col] = labelEncoder.fit_transform(df_test[col]) df_train.head() df_test.head() y = df_train.Survived x = df_train.drop(["Survived"], axis=1) print(x.info()) y.shape # split data to train and (validation or test) split from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split( x, y, train_size=0.8, test_size=0.2, random_state=0 ) y_val.shape # import RandomForestClassifier model from sklearn.ensemble import RandomForestClassifier random_forest = RandomForestClassifier(n_estimators=200, max_depth=7, random_state=40) random_forest.fit(x_train, y_train) y_pred = random_forest.predict(x_val) # Calculate the accuracy from sklearn.metrics import accuracy_score acc = accuracy_score(y_pred, y_val) print("Test accuracy is", acc) y_final_pred = random_forest.predict(df_test) submission = pd.read_csv("/kaggle/input/titanic/test.csv") submission_df = pd.DataFrame() submission_df["PassengerId"] = submission.PassengerId submission_df["Survived"] = y_final_pred # If you want to see all prdicted data to ensure there is no somthing wrong before submit uncomment it # pd.set_option("display.max_rows", None) # submission_df submission_df.to_csv("submission1.csv", index=False, header=True)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/887/69887508.ipynb
null
null
[{"Id": 69887508, "ScriptId": 19101518, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6022636, "CreationDate": "08/03/2021 18:10:36", "VersionNumber": 1.0, "Title": "30_Days_of_ML_(Day1)", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 136.0, "LinesInsertedFromPrevious": 136.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
### import pandas and numpy import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # Data Visualization Library import seaborn as sns # Data Visualization Library # to present all column in results pd.set_option("display.max_columns", None) # read the train dataset df_train = pd.read_csv("/kaggle/input/titanic/train.csv") df_test = pd.read_csv("/kaggle/input/titanic/test.csv") # # Data Exploration df_train.head() df_train.info() df_test.info() # # EDA (Exploratry Data Analysis) # **Getting important information about numircal and Catagorial data and getting some insights from it** # getting statistics information from numircal data df_train.describe() # getting information for catagorial data print(df_train.describe(exclude=[np.number]).shape) df_train.describe(exclude=[np.number]) # ##### we can see from above scdule that the name is not important column because it like index that not have relation with data so I will drop it later # ##### Also we can note the number of male is more than females # Getting the distribution of passenger ages df_train.Age.hist(bins=100, rwidth=0.8, figsize=(14, 4)) plt.title("Age") plt.show() df_train.shape # Get information about different values in all features to study if there is a missing or unlogical values for col in df_train.select_dtypes(include=["object"]): print(f"For column {col}\n------------------\n") print(df_train[col].value_counts()) print("\n") df_train.isna().sum()[df_train.isna().sum() != 0] df_test.isna().sum()[df_train.isna().sum() != 0] # data is very low so it is not the best option to drop any of it and it is better to fill it with proper values df_train["Embarked"] = df_train["Embarked"].fillna("S") df_train["Cabin"] = df_train["Cabin"].fillna("others") df_test["Embarked"] = df_test["Embarked"].fillna("S") df_test["Cabin"] = df_test["Cabin"].fillna("others") # cat_cols["Embarked"].value_counts() sns.boxplot(x="Survived", y="Age", data=df_train) from sklearn.impute import SimpleImputer null_cols = df_train.columns[df_train.isna().any() == True].tolist() imputer = SimpleImputer(missing_values=np.NAN, strategy="mean") imputer = imputer.fit(df_train[null_cols]) clean_cols = imputer.transform(df_train[null_cols]) df_train[null_cols] = clean_cols df_train.head() # for test data null_cols_test = df_test.columns[df_test.isna().any() == True].tolist() imputer_test = SimpleImputer(missing_values=np.NAN, strategy="mean") imputer_test = imputer_test.fit(df_test[null_cols_test]) clean_cols_test = imputer_test.transform(df_test[null_cols_test]) df_test[null_cols_test] = clean_cols_test print(df_train.isna().sum()[df_train.isna().sum() != 0]) print(df_test.isna().sum()[df_train.isna().sum() != 0]) figure = plt.figure(figsize=(12, 12)) sns.heatmap(df_train.corr(), annot=True) plt.show() # #### We need to drop the name and passengerId column from our data because they are not related or affected on result df_train.drop(axis=1, columns=["Name", "PassengerId"], inplace=True) df_test.drop(axis=1, columns=["Name", "PassengerId"], inplace=True) # Encoding catagorial data in may model using label encoding method cat_cols = df_train.select_dtypes(include=["object"]) cols_names = cat_cols.columns print(cols_names) from sklearn.preprocessing import LabelEncoder labelEncoder = LabelEncoder() for col in cols_names: df_train[col] = labelEncoder.fit_transform(df_train[col]) df_test[col] = labelEncoder.fit_transform(df_test[col]) df_train.head() df_test.head() y = df_train.Survived x = df_train.drop(["Survived"], axis=1) print(x.info()) y.shape # split data to train and (validation or test) split from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split( x, y, train_size=0.8, test_size=0.2, random_state=0 ) y_val.shape # import RandomForestClassifier model from sklearn.ensemble import RandomForestClassifier random_forest = RandomForestClassifier(n_estimators=200, max_depth=7, random_state=40) random_forest.fit(x_train, y_train) y_pred = random_forest.predict(x_val) # Calculate the accuracy from sklearn.metrics import accuracy_score acc = accuracy_score(y_pred, y_val) print("Test accuracy is", acc) y_final_pred = random_forest.predict(df_test) submission = pd.read_csv("/kaggle/input/titanic/test.csv") submission_df = pd.DataFrame() submission_df["PassengerId"] = submission.PassengerId submission_df["Survived"] = y_final_pred # If you want to see all prdicted data to ensure there is no somthing wrong before submit uncomment it # pd.set_option("display.max_rows", None) # submission_df submission_df.to_csv("submission1.csv", index=False, header=True)
false
0
1,545
0
1,545
1,545
69549196
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import zipfile # !wget 'https://anaconda.org/conda-forge/libjpeg-turbo/2.1.0/download/linux-64/libjpeg-turbo-2.1.0-h7f98852_0.tar.bz2' -q # !wget 'https://anaconda.org/conda-forge/libgcc-ng/9.3.0/download/linux-64/libgcc-ng-9.3.0-h2828fa1_19.tar.bz2' -q # !wget 'https://anaconda.org/conda-forge/gdcm/2.8.9/download/linux-64/gdcm-2.8.9-py37h500ead1_1.tar.bz2' -q # !wget 'https://anaconda.org/conda-forge/conda/4.10.1/download/linux-64/conda-4.10.1-py37h89c1867_0.tar.bz2' -q # !wget 'https://anaconda.org/conda-forge/certifi/2020.12.5/download/linux-64/certifi-2020.12.5-py37h89c1867_1.tar.bz2' -q # !wget 'https://anaconda.org/conda-forge/openssl/1.1.1k/download/linux-64/openssl-1.1.1k-h7f98852_0.tar.bz2' -q # !conda install 'libjpeg-turbo-2.1.0-h7f98852_0.tar.bz2' -c conda-forge -y # !conda install 'libgcc-ng-9.3.0-h2828fa1_19.tar.bz2' -c conda-forge -y # !conda install 'gdcm-2.8.9-py37h500ead1_1.tar.bz2' -c conda-forge -y # !conda install 'conda-4.10.1-py37h89c1867_0.tar.bz2' -c conda-forge -y # !conda install 'certifi-2020.12.5-py37h89c1867_1.tar.bz2' -c conda-forge -y # !conda install 'openssl-1.1.1k-h7f98852_0.tar.bz2' -c conda-forge -y # import gdcm import pydicom import cv2 from pydicom.pixel_data_handlers.util import apply_voi_lut def read_xray(path, voi_lut=True, fix_monochrome=True): # Original from: https://www.kaggle.com/raddar/convert-dicom-to-np-array-the-correct-way dicom = pydicom.read_file(path) # VOI LUT (if available by DICOM device) is used to transform raw DICOM data to # "human-friendly" view if voi_lut: data = apply_voi_lut(dicom.pixel_array, dicom) else: data = dicom.pixel_array # depending on this value, X-ray may look inverted - fix that: if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1": data = np.amax(data) - data data = data - np.min(data) data = data / np.max(data) # data = (data * 255).astype(np.uint8) return data dir_path_out = r"train_png" os.makedirs(dir_path_out) import tqdm list_imgs = [] dir_path = r"../input/siim-covid19-detection/train" for dirs, subdirs, filenames in tqdm.tqdm(os.walk(dir_path)): for i_file in filenames: path_full = os.path.join(dirs, i_file) list_imgs.append(path_full) len(list_imgs) zf = zipfile.ZipFile(os.path.join(dir_path_out, "train_1.zip"), mode="w") # for dirs, subdirs, filenames in tqdm.tqdm(os.walk(dir_path)): for idx, img_path in enumerate(list_imgs): # if idx < 2000: # continue if idx > 100: break path_out = os.path.join(dir_path_out, os.path.relpath(img_path, dir_path)) path_out = os.path.splitext(path_out)[0] + ".png" if not os.path.exists(path_out): if not os.path.exists(os.path.split(path_out)[0]): os.makedirs(os.path.split(path_out)[0]) img = read_xray(img_path) if img is not None: cv2.imwrite(path_out, img) zf.write(path_out)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/549/69549196.ipynb
null
null
[{"Id": 69549196, "ScriptId": 18233064, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 460512, "CreationDate": "08/01/2021 11:49:19", "VersionNumber": 3.0, "Title": "Data prepare", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 82.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 81.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import zipfile # !wget 'https://anaconda.org/conda-forge/libjpeg-turbo/2.1.0/download/linux-64/libjpeg-turbo-2.1.0-h7f98852_0.tar.bz2' -q # !wget 'https://anaconda.org/conda-forge/libgcc-ng/9.3.0/download/linux-64/libgcc-ng-9.3.0-h2828fa1_19.tar.bz2' -q # !wget 'https://anaconda.org/conda-forge/gdcm/2.8.9/download/linux-64/gdcm-2.8.9-py37h500ead1_1.tar.bz2' -q # !wget 'https://anaconda.org/conda-forge/conda/4.10.1/download/linux-64/conda-4.10.1-py37h89c1867_0.tar.bz2' -q # !wget 'https://anaconda.org/conda-forge/certifi/2020.12.5/download/linux-64/certifi-2020.12.5-py37h89c1867_1.tar.bz2' -q # !wget 'https://anaconda.org/conda-forge/openssl/1.1.1k/download/linux-64/openssl-1.1.1k-h7f98852_0.tar.bz2' -q # !conda install 'libjpeg-turbo-2.1.0-h7f98852_0.tar.bz2' -c conda-forge -y # !conda install 'libgcc-ng-9.3.0-h2828fa1_19.tar.bz2' -c conda-forge -y # !conda install 'gdcm-2.8.9-py37h500ead1_1.tar.bz2' -c conda-forge -y # !conda install 'conda-4.10.1-py37h89c1867_0.tar.bz2' -c conda-forge -y # !conda install 'certifi-2020.12.5-py37h89c1867_1.tar.bz2' -c conda-forge -y # !conda install 'openssl-1.1.1k-h7f98852_0.tar.bz2' -c conda-forge -y # import gdcm import pydicom import cv2 from pydicom.pixel_data_handlers.util import apply_voi_lut def read_xray(path, voi_lut=True, fix_monochrome=True): # Original from: https://www.kaggle.com/raddar/convert-dicom-to-np-array-the-correct-way dicom = pydicom.read_file(path) # VOI LUT (if available by DICOM device) is used to transform raw DICOM data to # "human-friendly" view if voi_lut: data = apply_voi_lut(dicom.pixel_array, dicom) else: data = dicom.pixel_array # depending on this value, X-ray may look inverted - fix that: if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1": data = np.amax(data) - data data = data - np.min(data) data = data / np.max(data) # data = (data * 255).astype(np.uint8) return data dir_path_out = r"train_png" os.makedirs(dir_path_out) import tqdm list_imgs = [] dir_path = r"../input/siim-covid19-detection/train" for dirs, subdirs, filenames in tqdm.tqdm(os.walk(dir_path)): for i_file in filenames: path_full = os.path.join(dirs, i_file) list_imgs.append(path_full) len(list_imgs) zf = zipfile.ZipFile(os.path.join(dir_path_out, "train_1.zip"), mode="w") # for dirs, subdirs, filenames in tqdm.tqdm(os.walk(dir_path)): for idx, img_path in enumerate(list_imgs): # if idx < 2000: # continue if idx > 100: break path_out = os.path.join(dir_path_out, os.path.relpath(img_path, dir_path)) path_out = os.path.splitext(path_out)[0] + ".png" if not os.path.exists(path_out): if not os.path.exists(os.path.split(path_out)[0]): os.makedirs(os.path.split(path_out)[0]) img = read_xray(img_path) if img is not None: cv2.imwrite(path_out, img) zf.write(path_out)
false
0
1,301
0
1,301
1,301
69549319
<jupyter_start><jupyter_text>NLP-Word2Vec-Embeddings(pretrained) ### Context ![word2vec][1] Word2vec is a group of related models that are used to produce word embeddings. These models are shallow, two-layer neural networks that are trained to reconstruct linguistic contexts of words. Word2vec takes as its input a large corpus of text and produces a vector space, typically of several hundred dimensions, with each unique word in the corpus being assigned a corresponding vector in the space. Word vectors are positioned in the vector space such that words that share common contexts in the corpus are located in close proximity to one another in the space. ### Content Existing Word2Vec Embeddings. GoogleNews-vectors-negative300.bin glove.6B.50d.txt glove.6B.100d.txt glove.6B.200d.txt glove.6B.300d.txt Kaggle dataset identifier: nlpword2vecembeddingspretrained <jupyter_script># After exploration in 2021_CommonLitReadability notebook, the best model is re-implemented here for clarity. random_state = 317817398 import re # regex from tqdm import tqdm import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras import layers, models, callbacks import keras_tuner as kt from tensorflow.keras.backend import stop_gradient from sklearn.preprocessing import QuantileTransformer from sklearn.linear_model import SGDRegressor from sklearn.pipeline import make_pipeline from sklearn.metrics import mean_squared_error import matplotlib.pyplot as plt import random as rd rd.seed(random_state) np.random.seed(random_state) tf.random.set_seed(random_state) # auxiliary data from public datasets # English word use frequency word_list = pd.read_csv( "/kaggle/input/english-word-frequency/unigram_freq.csv", dtype={"word": str, "count": int}, ) max_word_length = np.max(word_list.word.apply(lambda x: len(str(x)))) word_list.set_index("word", inplace=True) word_list["count"] /= word_list["count"].sum() # turn the word_list into a dictionary, a function to handle unknown words too word_list = word_list.to_dict()["count"] def word_freq(word): try: return word_list[word] except Exception as e: return 0.0 # import word embeddings from the word2vec Kaggle dataset embedding_dim = 300 # 50, 100, 200, or 300; see the dataset instructions word2vec = {} with open( "/kaggle/input/nlpword2vecembeddingspretrained/glove.6B.%id.txt" % embedding_dim, "r", ) as f: for line in tqdm(f, total=400000): fields = line.split() word2vec[fields[0]] = np.array(fields[1:]).astype(np.float) def word_vec(x): if x in word2vec.keys(): return word2vec[x] else: return np.zeros(embedding_dim) # data preprocessing difficult_punctuation = ";\"'‘“:-()[]+?!$&/" number_characters = "0123456789" # Let's set a constant length of each excerpt, for parsing with NN. If the text is shorter, we will fill it with empty words WORDS_PER_EXCERPT = 205 def count_repeated_characters(word): chars = np.array(list(word)) return np.sum(chars[1:] == chars[:-1]) # Does maximum distance between two word embeddings help at all? def max_distance(emb_matrix): max_dist = 0.0 n_vectors = emb_matrix.shape[0] for i_vec in range(n_vectors - 1): max_dist = max( max_dist, np.max( np.sum((emb_matrix[(i_vec + 1) :] - emb_matrix[i_vec]) ** 2, axis=1) ), ) return np.sqrt(max_dist) def process_data( csv_train="/kaggle/input/commonlitreadabilityprize/train.csv", csv_test="/kaggle/input/commonlitreadabilityprize/test.csv", ): data_train_val = pd.read_csv(csv_train) data_test = pd.read_csv(csv_test) datasets = [data_train_val, data_test] noTrain_columns = ["target", "standard_error", "excerpt", "id"] for data in datasets: # drop url_legal, unique -- they are not in the test dataset, and do not look useful anyways data.drop(["url_legal", "license"], axis=1, inplace=True) # Generate the most basic features data["no_lineBreaks"] = data["excerpt"].str.split("\n").transform(len) data["no_sentences"] = data["excerpt"].str.split(".").transform(len) data["no_words"] = data["excerpt"].str.split(" ").transform(len) data["no_characters"] = data["excerpt"].apply(len) # potentially useful mean ratios data["mean_sentences_per_lineBreak"] = ( data["no_sentences"] / data["no_lineBreaks"] ) data["mean_words_per_sentence"] = data["no_words"] / data["no_sentences"] data["mean_characters_per_word"] = data["no_characters"] / data["no_words"] # potentially useful min/max ratios sentences_per_lineBreak = data.excerpt.str.split("\n").transform( lambda x: [len(y.split(".")) for y in x] ) data["min_sentences_per_lineBreak"] = sentences_per_lineBreak.apply(min) data["max_sentences_per_lineBreak"] = sentences_per_lineBreak.apply(max) del sentences_per_lineBreak lineBreaks_per_sentence = data.excerpt.str.split(".").transform( lambda x: [len(y.split("\n")) for y in x] ) data["min_lineBreaks_per_sentence"] = lineBreaks_per_sentence.apply(min) data["max_lineBreaks_per_sentence"] = lineBreaks_per_sentence.apply(max) del lineBreaks_per_sentence words_per_sentence = data.excerpt.str.split(".").transform( lambda x: [len(y.split(" ")) for y in x] ) data["min_words_per_sentence"] = words_per_sentence.apply(min) data["max_words_per_sentence"] = words_per_sentence.apply(max) del words_per_sentence words_per_lineBreak = data.excerpt.str.split("\n").transform( lambda x: [len(y.split(" ")) for y in x] ) data["min_words_per_lineBreak"] = words_per_lineBreak.apply(min) data["max_words_per_lineBreak"] = words_per_lineBreak.apply(max) del words_per_lineBreak characters_per_word = data.excerpt.str.split(" ").transform( lambda x: [len(y) for y in x] ) data["min_characters_per_word"] = characters_per_word.apply(min) data["max_characters_per_word"] = characters_per_word.apply(max) del characters_per_word characters_per_sentence = data.excerpt.str.split(".").transform( lambda x: [len(y) for y in x] ) data["min_characters_per_sentence"] = characters_per_sentence.apply(min) data["max_characters_per_sentence"] = characters_per_sentence.apply(max) del characters_per_sentence characters_per_lineBreak = data.excerpt.str.split("\n").transform( lambda x: [len(y) for y in x] ) data["min_characters_per_lineBreak"] = characters_per_lineBreak.apply(min) data["max_characters_per_lineBreak"] = characters_per_lineBreak.apply(max) del characters_per_lineBreak # punctuation marks count in the text data["punctuation_count"] = data.excerpt.apply( lambda x: sum([c in difficult_punctuation for c in x]) ) data["punctuation_frequency"] = ( data["punctuation_count"] / data["no_characters"] ) # numbers might indicate a text of technical nature, thus more difficult data["number_count"] = data.excerpt.apply( lambda x: sum([c in number_characters for c in x]) ) data["number_frequency"] = data["number_count"] / data["no_characters"] # multiple letters might indicate colloquial speech, e.g., aahh, oooh, etc. data["multiple_count"] = data.excerpt.apply(count_repeated_characters) data["multiple_count_frequency"] = ( data["multiple_count"] / data["no_characters"] ) # create a word list for each excerpt, # remove punctuation and change to lowercase data["word_list"] = data.excerpt.apply( lambda x: re.findall("[a-zA-Z]+", x.lower()) ) if "word_list" not in noTrain_columns: noTrain_columns += [ "word_list", ] # now add a mean, min, max frequency of a word in a given excerpt data["word_frequencies"] = data.word_list.apply( lambda x: [word_freq(y) for y in x] ) if "word_frequencies" not in noTrain_columns: noTrain_columns += [ "word_frequencies", ] data["mean_word_frequecy"] = data.word_frequencies.apply(np.mean) data["median_word_frequecy"] = data.word_frequencies.apply(np.median) data["min_word_frequecy"] = data.word_frequencies.apply(np.min) data["max_word_frequecy"] = data.word_frequencies.apply(np.max) data["std_word_frequecy"] = data.word_frequencies.apply(np.std) # some words are not in the dictionary, may not be English, or could be made up (zigzzzz, huzzah) data["non_word_count"] = data.word_frequencies.apply( lambda x: np.sum(np.array(x) == 0) ) data["non_word_frequency"] = data["non_word_count"] / data["no_words"] # include word embedding data in our dataframe data["word_embeddings"] = data.word_list.apply( lambda x: np.array([word_vec(y) for y in x]) ) if "word_embeddings" not in noTrain_columns: noTrain_columns += [ "word_embeddings", ] # some useful statistics with word embeddings we get right away # the topic of the excerpt data[["mean_embedding%i" % i for i in range(embedding_dim)]] = pd.DataFrame( data.word_embeddings.apply(lambda x: np.mean(x, axis=0).tolist()).to_list() ) # the variety of topics touched upon by the excerpt data["stddev_embedding"] = data.word_embeddings.apply( lambda x: np.sum(np.std(x, axis=0)) ) # Does maximum distance between two word embeddings help at all? data["maxdist_embedding"] = data.word_embeddings.apply(max_distance) return data_train_val, data_test, noTrain_columns def augment_data(data, target="target", std="standard_error", samples_per_row=2): """This uses the standard_error column to generate copies of each row with target values drawn from a gaussian distribution. This way we can take the confidence measure into account. This should probably be done better than just copying the rows, but it should do for a test...""" # replicate each row res = ( pd.concat( [ data, ] * samples_per_row ) .sort_values("id") .reset_index(drop=True) ) # draw samples from a normal distribution for idd in tqdm(data.id.unique()): mask = res.id == idd mean, std = res[mask].target.iloc[0], res[mask].standard_error.iloc[0] res.loc[res.index[mask], "target"] = np.random.normal( mean, std, samples_per_row ) # shuffle the augmented dataframe res = res.sample(frac=1).reset_index(drop=True) return res def prepare_indices(data_train_val, val_frac=0.2): """prepare indices for validation where needed""" indices_all = data_train_val.index.to_list() indices_val = rd.sample(indices_all, int(val_frac * len(data_train_val))) indices_train = list(set(indices_all) - set(indices_val)) indices = [indices_train, indices_val] return indices # preprocess the data print("Pre-processing csv data.. ", flush=True, end="") data_train_val, data_test, noTrain_columns = process_data() print("done", flush=True) # augment the training / validation data -- this implementation uses too much memory if False: print("Augmenting data.. ", flush=True) data_train_val = augment_data(data_train_val) print("done.", flush=True) # validation split print("Preparing validation split.. ", flush=True, end="") indices = prepare_indices(data_train_val) print("done.", flush=True) # tools for training NN models def split_data( df, target="target", test_size=0.2, pca=False, indices=None, augmented=True ): if pca: pca = PCA() pca_cols = pca.fit_transform(data[data.columns.difference(noTrain_columns)]) X = pd.DataFrame( data=pca_cols, columns=["PCA%i" % i for i in range(pca_cols.shape[1])] ) else: X = df[df.columns.difference(noTrain_columns)] if augmented: y = df[[target, "standard_error"]].to_numpy() else: y = df[target] if indices == None: X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, random_state=random_state ) else: train_indices, test_indices = indices X_train = X.iloc[train_indices] X_test = X.iloc[test_indices] if augmented: y_train = y[train_indices] y_test = y[test_indices] else: y_train = y.iloc[train_indices] y_test = y.iloc[test_indices] return X_train, X_test, y_train, y_test, pca def fit_from_hp_dict( build_model, hp_dict, nn_inputs, indices, y_train, y_test, early_stopping=True, validation=True, epochs=1024, ): """Using saved HyperParameter.values dict, build the tuned model, train it, and plot diagnostics.""" best_hyperparameters = kt.HyperParameters() best_hyperparameters.values = hp_dict best_model = build_model(best_hyperparameters) if early_stopping: stop_early = tf.keras.callbacks.EarlyStopping( monitor="val_loss", patience=16, restore_best_weights=True ) callbacks = [ stop_early, ] else: callbacks = [] if validation: validation_data = (nn_inputs[indices[1]], y_test) else: validation_data = None history = best_model.fit( nn_inputs[indices[0]], y_train, epochs=epochs, validation_data=validation_data, callbacks=callbacks, ) plt.clf() plt.plot( history.history["root_mean_squared_error"], label="root_mean_squared_error" ) plt.plot( history.history["val_root_mean_squared_error"], label="val_root_mean_squared_error", ) plt.xlabel("Epoch") plt.ylabel("RMSE") plt.legend(loc="lower right") plt.show() plt.close() return best_model def augmented_loss(loss_instance, y, y_val, sample_weight=None): """This uses the standard_error column to generate a different sample from the target distribution at every training step, to use for loss calculation.""" if y.shape == y_val.shape: y_sample = y else: y_sample = tf.random.normal( [ 1, ], y[:, 0], y[:, 1], y_val.dtype, ) return loss_instance(stop_gradient(y_sample), y_val, sample_weight) # word length and frequency CNN # To feed the model, use the first WORDS_PER_EXCERPT words of the excerpt. # If less is available, fill with empty def extract_len_freq(word_list, no_words=WORDS_PER_EXCERPT): # cut to the right length, normalize, extract word frequency res = [[len(x) * 1.0 / max_word_length, word_freq(x)] for x in word_list[:no_words]] if len(res) < no_words: res += [ [0, 0], ] * (no_words - len(res)) res = np.array(res, dtype=np.float) return res def nn_preprocess_len_freq(data): return np.vstack(data.word_list.apply(extract_len_freq).to_numpy()).reshape( (-1, WORDS_PER_EXCERPT, 2) ) def build_cnn_len_freq(hp, augmented=True): nn = models.Sequential() nn.add(layers.Input(shape=(WORDS_PER_EXCERPT, 2))) num_conv_blocks = hp.Int("conv_blocks", 1, 5) for i in range(num_conv_blocks): with hp.conditional_scope("conv_blocks", list(range(i + 1, 5 + 1))): # num > i nn.add( layers.Conv1D( hp.Int("filters_conv%i" % i, 2, 16, step=2), ( min( nn.output_shape[1], hp.Int("kernel_conv%i" % i, 2, 16, step=2), ), ), activation="relu", ) ) pooling_choice = hp.Choice("pooling%i" % i, ["avg", "max", "none"]) with hp.conditional_scope("pooling%i" % i, ["avg", "max"]): if pooling_choice == "max": nn.add( layers.MaxPooling1D( ( min( nn.output_shape[1], hp.Int("kernel_pool%i" % i, 2, 8, step=2), ), ) ) ) elif pooling_choice == "avg": nn.add( layers.AveragePooling1D( ( min( nn.output_shape[1], hp.Int("kernel_pool%i" % i, 2, 8, step=2), ), ) ) ) nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) nn.add(layers.Flatten()) num_dense_blocks = hp.Int("dense_blocks", 1, 3) for i in range(num_dense_blocks): with hp.conditional_scope("dense_blocks", list(range(i + 1, 3 + 1))): # num > i nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) if hp.Choice("batch_norm", [True, False]): nn.add(layers.BatchNormalization()) nn.add( layers.Dense(hp.Int("dense%i" % i, 4, 64, step=4), activation="relu") ) if hp.Choice("batch_norm_output", [True, False]): nn.add(layers.BatchNormalization()) nn.add(layers.Dense(1)) if augmented: loss = lambda y1, y2, w=None: augmented_loss( tf.keras.losses.MeanSquaredError(), y1, y2, w ) else: loss = tf.keras.losses.MeanSquaredError() nn.compile( optimizer=tf.keras.optimizers.Adam( hp.Float("learning_rate", 1e-4, 0.3, sampling="log") ), loss=loss, metrics=["RootMeanSquaredError"], ) return nn best_hyperparameters_len_freq_dict = { "conv_blocks": 2, "filters_conv0": 8, "kernel_conv0": 10, "pooling0": "none", "dropout": 0.2, "dense_blocks": 2, "batch_norm": 1, "dense0": 12, "batch_norm_output": 0, "learning_rate": 0.018757792810801824, "filters_conv1": 12, "kernel_conv1": 6, "pooling1": "avg", "kernel_pool1": 4, "dense1": 4, "tuner/epochs": 32, "tuner/initial_epoch": 11, "tuner/bracket": 1, "tuner/round": 1, "tuner/trial_id": "0011a1157813e370e78f8a237ca72049", } # fit the model X_train, X_test, y_train, y_test, pca = split_data( data_train_val, target="target", indices=indices, pca=False ) nn_inputs_len_freq = nn_preprocess_len_freq(data_train_val) best_model_len_freq = fit_from_hp_dict( build_cnn_len_freq, best_hyperparameters_len_freq_dict, nn_inputs_len_freq, indices, y_train, y_test, ) data_train_val["cnn_word_len+freq"] = best_model_len_freq.predict(nn_inputs_len_freq) del X_train, X_test, y_train, y_test, pca, nn_inputs_len_freq # apply it to the test data nn_inputs_len_freq = nn_preprocess_len_freq(data_test) data_test["cnn_word_len+freq"] = best_model_len_freq.predict(nn_inputs_len_freq) del nn_inputs_len_freq # embedding cnn # For now, let's use the first WORDS_PER_EXCERPT words again. We will expand it to include the entire excerpts later.. def extract_emb(emb_matrix, no_words=WORDS_PER_EXCERPT): # cut or pad to the right length return np.concatenate( [ emb_matrix[:no_words, :], np.zeros([max(0, no_words - emb_matrix.shape[0]), emb_matrix.shape[1]]), ] ) def nn_preprocess_emb(data): return np.vstack(data.word_embeddings.apply(extract_emb).to_numpy()).reshape( (-1, WORDS_PER_EXCERPT, embedding_dim) ) def build_cnn_emb(hp, augmented=True): nn = models.Sequential() nn.add(layers.Input(shape=(WORDS_PER_EXCERPT, embedding_dim))) num_conv_blocks = hp.Int("conv_blocks", 0, 5) for i in range(num_conv_blocks): with hp.conditional_scope("conv_blocks", list(range(i + 1, 5 + 1))): # num > i nn.add( layers.Conv1D( hp.Int("filters_conv%i" % i, 2, 16, step=2), ( min( nn.output_shape[1], hp.Int("kernel_conv%i" % i, 2, 16, step=2), ), ), activation="relu", ) ) pooling_choice = hp.Choice("pooling%i" % i, ["avg", "max", "none"]) with hp.conditional_scope("pooling%i" % i, ["avg", "max"]): if pooling_choice == "max": nn.add( layers.MaxPooling1D( ( min( nn.output_shape[1], hp.Int("kernel_pool%i" % i, 2, 8, step=2), ), ) ) ) elif pooling_choice == "avg": nn.add( layers.AveragePooling1D( ( min( nn.output_shape[1], hp.Int("kernel_pool%i" % i, 2, 8, step=2), ), ) ) ) nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) nn.add(layers.Flatten()) num_dense_blocks = hp.Int("dense_blocks", 1, 5) for i in range(num_dense_blocks): with hp.conditional_scope("dense_blocks", list(range(i + 1, 5 + 1))): # num > i nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) if hp.Choice("batch_norm", [True, False]): nn.add(layers.BatchNormalization()) nn.add( layers.Dense(hp.Int("dense%i" % i, 4, 64, step=4), activation="relu") ) if hp.Choice("batch_norm_output", [True, False]): nn.add(layers.BatchNormalization()) nn.add(layers.Dense(1)) if augmented: loss = lambda y1, y2, w=None: augmented_loss( tf.keras.losses.MeanSquaredError(), y1, y2, w ) else: loss = tf.keras.losses.MeanSquaredError() nn.compile( optimizer=tf.keras.optimizers.Adam( hp.Float("learning_rate", 1e-4, 0.3, sampling="log") ), loss=loss, metrics=["RootMeanSquaredError"], ) return nn best_hyperparameters_emb_dict = { "conv_blocks": 1, "dense_blocks": 2, "dropout": 0.30000000000000004, "batch_norm": 0, "dense0": 48, "batch_norm_output": 1, "learning_rate": 0.002693667798794543, "dense1": 12, "filters_conv0": 10, "kernel_conv0": 4, "pooling0": "max", "kernel_pool0": 6, "tuner/epochs": 43, "tuner/initial_epoch": 15, "tuner/bracket": 4, "tuner/round": 3, "tuner/trial_id": "6ef193d541fe31f3ba90e45aedbaafdf", } # fit the model X_train, X_test, y_train, y_test, pca = split_data( data_train_val, target="target", indices=indices, pca=False ) nn_inputs_emb = nn_preprocess_emb(data_train_val) best_model_emb = fit_from_hp_dict( build_cnn_emb, best_hyperparameters_emb_dict, nn_inputs_emb, indices, y_train, y_test, ) data_train_val["cnn_word_embeddings"] = best_model_emb.predict(nn_inputs_emb) del X_train, X_test, y_train, y_test, pca, nn_inputs_emb # apply it to the test data nn_inputs_emb = nn_preprocess_emb(data_test) data_test["cnn_word_embeddings"] = best_model_emb.predict(nn_inputs_emb) del nn_inputs_emb # embedding lstm def extract_emb(emb_matrix, no_words=WORDS_PER_EXCERPT): # cut or pad to the right length return np.concatenate( [ emb_matrix[:no_words, :], np.zeros([max(0, no_words - emb_matrix.shape[0]), emb_matrix.shape[1]]), ] ) def nn_preprocess_emb(data): return np.vstack(data.word_embeddings.apply(extract_emb).to_numpy()).reshape( (-1, WORDS_PER_EXCERPT, embedding_dim) ) def build_lstm_emb(hp, augmented=True): nn = models.Sequential() nn.add(layers.Input(shape=(WORDS_PER_EXCERPT, embedding_dim))) num_conv_blocks = hp.Int("lstm_blocks", 1, 1) for i in range(num_conv_blocks): with hp.conditional_scope("lstm_blocks", list(range(i + 1, 5 + 1))): # num > i nn.add(layers.LSTM(hp.Int("lstm_units%i" % i, 8, 128, sampling="log"))) nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) nn.add(layers.Flatten()) num_dense_blocks = hp.Int("dense_blocks", 1, 5) for i in range(num_dense_blocks): with hp.conditional_scope("dense_blocks", list(range(i + 1, 5 + 1))): # num > i nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) if hp.Choice("batch_norm", [True, False]): nn.add(layers.BatchNormalization()) nn.add( layers.Dense(hp.Int("dense%i" % i, 4, 64, step=4), activation="relu") ) if hp.Choice("batch_norm_output", [True, False]): nn.add(layers.BatchNormalization()) nn.add(layers.Dense(1)) if augmented: loss = lambda y1, y2, w=None: augmented_loss( tf.keras.losses.MeanSquaredError(), y1, y2, w ) else: loss = tf.keras.losses.MeanSquaredError() nn.compile( optimizer=tf.keras.optimizers.Adam( hp.Float("learning_rate", 1e-4, 0.3, sampling="log") ), loss=loss, metrics=["RootMeanSquaredError"], ) return nn best_hyperparameters_emb_lstm_dict = { "lstm_blocks": 1, "lstm_units0": 66, "dropout": 0.2, "dense_blocks": 3, "batch_norm": 1, "dense0": 20, "batch_norm_output": 1, "learning_rate": 0.0022843219066342054, "dense1": 60, "dense2": 56, "tuner/epochs": 43, "tuner/initial_epoch": 0, "tuner/bracket": 1, "tuner/round": 0, } # fit the model X_train, X_test, y_train, y_test, pca = split_data( data_train_val, target="target", indices=indices, pca=False ) nn_inputs_emb = nn_preprocess_emb(data_train_val) best_model_emb_lstm = fit_from_hp_dict( build_lstm_emb, best_hyperparameters_emb_lstm_dict, nn_inputs_emb, indices, y_train, y_test, ) data_train_val["lstm_word_embeddings"] = best_model_emb_lstm.predict(nn_inputs_emb) del X_train, X_test, y_train, y_test, pca, nn_inputs_emb # apply it to the test data nn_inputs_emb = nn_preprocess_emb(data_test) data_test["lstm_word_embeddings"] = best_model_emb_lstm.predict(nn_inputs_emb) del nn_inputs_emb # the final regressor scalers = { "QuantileTransf": QuantileTransformer(), } sgdr_best_hps = { "loss": "squared_epsilon_insensitive", "penalty": "elasticnet", "l1_ratio": 0.0, "alpha": 0.1, "epsilon": 0.001, "learning_rate": "invscaling", "eta0": 0.01, "power_t": 0.25, } sgdr_best_pca = False sgdr_best_scaler = "QuantileTransf" # first let's see how it does def try_SGDR_opt(data, indices=None, output=False): X_train, X_test, y_train, y_test, pca = split_data( data, pca=sgdr_best_pca, indices=indices, augmented=False ) reg = make_pipeline( scalers[sgdr_best_scaler], SGDRegressor(**sgdr_best_hps, random_state=random_state), ) reg.fit(X_train, y_train) print("RMSE: ", mean_squared_error(y_test, reg.predict(X_test), squared=False)) if output: return reg print("rmse from validation:") try_SGDR_opt(data_train_val, indices=indices) # now retrain on full training data X = data_train_val[data_train_val.columns.difference(noTrain_columns)] y = data_train_val["target"] reg = make_pipeline( scalers[sgdr_best_scaler], SGDRegressor(**sgdr_best_hps, random_state=random_state) ) reg.fit(X, y) print("rmse from full data:") print("RMSE: ", mean_squared_error(y, reg.predict(X), squared=False)) del X, y # finally, generate predictions for the test dataset X = data_test[data_test.columns.difference(noTrain_columns)] data_test["target"] = reg.predict(X) # save the submission data_test[["id", "target"]].to_csv("submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/549/69549319.ipynb
nlpword2vecembeddingspretrained
pkugoodspeed
[{"Id": 69549319, "ScriptId": 18926587, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1657309, "CreationDate": "08/01/2021 11:51:22", "VersionNumber": 11.0, "Title": "2021_CommonLitReadability_final", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 584.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 583.0, "LinesInsertedFromFork": 424.0, "LinesDeletedFromFork": 975.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 160.0, "TotalVotes": 0}]
[{"Id": 92863873, "KernelVersionId": 69549319, "SourceDatasetVersionId": 16023}, {"Id": 92863872, "KernelVersionId": 69549319, "SourceDatasetVersionId": 3976}]
[{"Id": 16023, "DatasetId": 11594, "DatasourceVersionId": 16023, "CreatorUserId": 994703, "LicenseName": "CC0: Public Domain", "CreationDate": "02/01/2018 17:14:25", "VersionNumber": 1.0, "Title": "NLP-Word2Vec-Embeddings(pretrained)", "Slug": "nlpword2vecembeddingspretrained", "Subtitle": "Existing word2vec embeddings including glove and google news", "Description": "### Context\n![word2vec][1]\nWord2vec is a group of related models that are used to produce word embeddings. These models are shallow, two-layer neural networks that are trained to reconstruct linguistic contexts of words. Word2vec takes as its input a large corpus of text and produces a vector space, typically of several hundred dimensions, with each unique word in the corpus being assigned a corresponding vector in the space. Word vectors are positioned in the vector space such that words that share common contexts in the corpus are located in close proximity to one another in the space.\n### Content\n\nExisting Word2Vec Embeddings. \nGoogleNews-vectors-negative300.bin\nglove.6B.50d.txt\nglove.6B.100d.txt\nglove.6B.200d.txt\nglove.6B.300d.txt\n\n\n### Acknowledgements\n\nWe wouldn't be here without the help of others. If you owe any attributions or thanks, include them here along with any citations of past research.\n\n\n### Inspiration\n\nYour data will be in front of the world's largest data science community. What questions do you want to see answered?\n\n\n [1]: https://www.adityathakker.com/wp-content/uploads/2017/06/word-embeddings-994x675.png", "VersionNotes": "Initial release", "TotalCompressedBytes": 2645946569.0, "TotalUncompressedBytes": 2645946569.0}]
[{"Id": 11594, "CreatorUserId": 994703, "OwnerUserId": 994703.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 16023.0, "CurrentDatasourceVersionId": 16023.0, "ForumId": 19007, "Type": 2, "CreationDate": "02/01/2018 17:14:25", "LastActivityDate": "02/01/2018", "TotalViews": 14986, "TotalDownloads": 1099, "TotalVotes": 26, "TotalKernels": 62}]
[{"Id": 994703, "UserName": "pkugoodspeed", "DisplayName": "pkugoodspeed", "RegisterDate": "03/29/2017", "PerformanceTier": 2}]
# After exploration in 2021_CommonLitReadability notebook, the best model is re-implemented here for clarity. random_state = 317817398 import re # regex from tqdm import tqdm import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras import layers, models, callbacks import keras_tuner as kt from tensorflow.keras.backend import stop_gradient from sklearn.preprocessing import QuantileTransformer from sklearn.linear_model import SGDRegressor from sklearn.pipeline import make_pipeline from sklearn.metrics import mean_squared_error import matplotlib.pyplot as plt import random as rd rd.seed(random_state) np.random.seed(random_state) tf.random.set_seed(random_state) # auxiliary data from public datasets # English word use frequency word_list = pd.read_csv( "/kaggle/input/english-word-frequency/unigram_freq.csv", dtype={"word": str, "count": int}, ) max_word_length = np.max(word_list.word.apply(lambda x: len(str(x)))) word_list.set_index("word", inplace=True) word_list["count"] /= word_list["count"].sum() # turn the word_list into a dictionary, a function to handle unknown words too word_list = word_list.to_dict()["count"] def word_freq(word): try: return word_list[word] except Exception as e: return 0.0 # import word embeddings from the word2vec Kaggle dataset embedding_dim = 300 # 50, 100, 200, or 300; see the dataset instructions word2vec = {} with open( "/kaggle/input/nlpword2vecembeddingspretrained/glove.6B.%id.txt" % embedding_dim, "r", ) as f: for line in tqdm(f, total=400000): fields = line.split() word2vec[fields[0]] = np.array(fields[1:]).astype(np.float) def word_vec(x): if x in word2vec.keys(): return word2vec[x] else: return np.zeros(embedding_dim) # data preprocessing difficult_punctuation = ";\"'‘“:-()[]+?!$&/" number_characters = "0123456789" # Let's set a constant length of each excerpt, for parsing with NN. If the text is shorter, we will fill it with empty words WORDS_PER_EXCERPT = 205 def count_repeated_characters(word): chars = np.array(list(word)) return np.sum(chars[1:] == chars[:-1]) # Does maximum distance between two word embeddings help at all? def max_distance(emb_matrix): max_dist = 0.0 n_vectors = emb_matrix.shape[0] for i_vec in range(n_vectors - 1): max_dist = max( max_dist, np.max( np.sum((emb_matrix[(i_vec + 1) :] - emb_matrix[i_vec]) ** 2, axis=1) ), ) return np.sqrt(max_dist) def process_data( csv_train="/kaggle/input/commonlitreadabilityprize/train.csv", csv_test="/kaggle/input/commonlitreadabilityprize/test.csv", ): data_train_val = pd.read_csv(csv_train) data_test = pd.read_csv(csv_test) datasets = [data_train_val, data_test] noTrain_columns = ["target", "standard_error", "excerpt", "id"] for data in datasets: # drop url_legal, unique -- they are not in the test dataset, and do not look useful anyways data.drop(["url_legal", "license"], axis=1, inplace=True) # Generate the most basic features data["no_lineBreaks"] = data["excerpt"].str.split("\n").transform(len) data["no_sentences"] = data["excerpt"].str.split(".").transform(len) data["no_words"] = data["excerpt"].str.split(" ").transform(len) data["no_characters"] = data["excerpt"].apply(len) # potentially useful mean ratios data["mean_sentences_per_lineBreak"] = ( data["no_sentences"] / data["no_lineBreaks"] ) data["mean_words_per_sentence"] = data["no_words"] / data["no_sentences"] data["mean_characters_per_word"] = data["no_characters"] / data["no_words"] # potentially useful min/max ratios sentences_per_lineBreak = data.excerpt.str.split("\n").transform( lambda x: [len(y.split(".")) for y in x] ) data["min_sentences_per_lineBreak"] = sentences_per_lineBreak.apply(min) data["max_sentences_per_lineBreak"] = sentences_per_lineBreak.apply(max) del sentences_per_lineBreak lineBreaks_per_sentence = data.excerpt.str.split(".").transform( lambda x: [len(y.split("\n")) for y in x] ) data["min_lineBreaks_per_sentence"] = lineBreaks_per_sentence.apply(min) data["max_lineBreaks_per_sentence"] = lineBreaks_per_sentence.apply(max) del lineBreaks_per_sentence words_per_sentence = data.excerpt.str.split(".").transform( lambda x: [len(y.split(" ")) for y in x] ) data["min_words_per_sentence"] = words_per_sentence.apply(min) data["max_words_per_sentence"] = words_per_sentence.apply(max) del words_per_sentence words_per_lineBreak = data.excerpt.str.split("\n").transform( lambda x: [len(y.split(" ")) for y in x] ) data["min_words_per_lineBreak"] = words_per_lineBreak.apply(min) data["max_words_per_lineBreak"] = words_per_lineBreak.apply(max) del words_per_lineBreak characters_per_word = data.excerpt.str.split(" ").transform( lambda x: [len(y) for y in x] ) data["min_characters_per_word"] = characters_per_word.apply(min) data["max_characters_per_word"] = characters_per_word.apply(max) del characters_per_word characters_per_sentence = data.excerpt.str.split(".").transform( lambda x: [len(y) for y in x] ) data["min_characters_per_sentence"] = characters_per_sentence.apply(min) data["max_characters_per_sentence"] = characters_per_sentence.apply(max) del characters_per_sentence characters_per_lineBreak = data.excerpt.str.split("\n").transform( lambda x: [len(y) for y in x] ) data["min_characters_per_lineBreak"] = characters_per_lineBreak.apply(min) data["max_characters_per_lineBreak"] = characters_per_lineBreak.apply(max) del characters_per_lineBreak # punctuation marks count in the text data["punctuation_count"] = data.excerpt.apply( lambda x: sum([c in difficult_punctuation for c in x]) ) data["punctuation_frequency"] = ( data["punctuation_count"] / data["no_characters"] ) # numbers might indicate a text of technical nature, thus more difficult data["number_count"] = data.excerpt.apply( lambda x: sum([c in number_characters for c in x]) ) data["number_frequency"] = data["number_count"] / data["no_characters"] # multiple letters might indicate colloquial speech, e.g., aahh, oooh, etc. data["multiple_count"] = data.excerpt.apply(count_repeated_characters) data["multiple_count_frequency"] = ( data["multiple_count"] / data["no_characters"] ) # create a word list for each excerpt, # remove punctuation and change to lowercase data["word_list"] = data.excerpt.apply( lambda x: re.findall("[a-zA-Z]+", x.lower()) ) if "word_list" not in noTrain_columns: noTrain_columns += [ "word_list", ] # now add a mean, min, max frequency of a word in a given excerpt data["word_frequencies"] = data.word_list.apply( lambda x: [word_freq(y) for y in x] ) if "word_frequencies" not in noTrain_columns: noTrain_columns += [ "word_frequencies", ] data["mean_word_frequecy"] = data.word_frequencies.apply(np.mean) data["median_word_frequecy"] = data.word_frequencies.apply(np.median) data["min_word_frequecy"] = data.word_frequencies.apply(np.min) data["max_word_frequecy"] = data.word_frequencies.apply(np.max) data["std_word_frequecy"] = data.word_frequencies.apply(np.std) # some words are not in the dictionary, may not be English, or could be made up (zigzzzz, huzzah) data["non_word_count"] = data.word_frequencies.apply( lambda x: np.sum(np.array(x) == 0) ) data["non_word_frequency"] = data["non_word_count"] / data["no_words"] # include word embedding data in our dataframe data["word_embeddings"] = data.word_list.apply( lambda x: np.array([word_vec(y) for y in x]) ) if "word_embeddings" not in noTrain_columns: noTrain_columns += [ "word_embeddings", ] # some useful statistics with word embeddings we get right away # the topic of the excerpt data[["mean_embedding%i" % i for i in range(embedding_dim)]] = pd.DataFrame( data.word_embeddings.apply(lambda x: np.mean(x, axis=0).tolist()).to_list() ) # the variety of topics touched upon by the excerpt data["stddev_embedding"] = data.word_embeddings.apply( lambda x: np.sum(np.std(x, axis=0)) ) # Does maximum distance between two word embeddings help at all? data["maxdist_embedding"] = data.word_embeddings.apply(max_distance) return data_train_val, data_test, noTrain_columns def augment_data(data, target="target", std="standard_error", samples_per_row=2): """This uses the standard_error column to generate copies of each row with target values drawn from a gaussian distribution. This way we can take the confidence measure into account. This should probably be done better than just copying the rows, but it should do for a test...""" # replicate each row res = ( pd.concat( [ data, ] * samples_per_row ) .sort_values("id") .reset_index(drop=True) ) # draw samples from a normal distribution for idd in tqdm(data.id.unique()): mask = res.id == idd mean, std = res[mask].target.iloc[0], res[mask].standard_error.iloc[0] res.loc[res.index[mask], "target"] = np.random.normal( mean, std, samples_per_row ) # shuffle the augmented dataframe res = res.sample(frac=1).reset_index(drop=True) return res def prepare_indices(data_train_val, val_frac=0.2): """prepare indices for validation where needed""" indices_all = data_train_val.index.to_list() indices_val = rd.sample(indices_all, int(val_frac * len(data_train_val))) indices_train = list(set(indices_all) - set(indices_val)) indices = [indices_train, indices_val] return indices # preprocess the data print("Pre-processing csv data.. ", flush=True, end="") data_train_val, data_test, noTrain_columns = process_data() print("done", flush=True) # augment the training / validation data -- this implementation uses too much memory if False: print("Augmenting data.. ", flush=True) data_train_val = augment_data(data_train_val) print("done.", flush=True) # validation split print("Preparing validation split.. ", flush=True, end="") indices = prepare_indices(data_train_val) print("done.", flush=True) # tools for training NN models def split_data( df, target="target", test_size=0.2, pca=False, indices=None, augmented=True ): if pca: pca = PCA() pca_cols = pca.fit_transform(data[data.columns.difference(noTrain_columns)]) X = pd.DataFrame( data=pca_cols, columns=["PCA%i" % i for i in range(pca_cols.shape[1])] ) else: X = df[df.columns.difference(noTrain_columns)] if augmented: y = df[[target, "standard_error"]].to_numpy() else: y = df[target] if indices == None: X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, random_state=random_state ) else: train_indices, test_indices = indices X_train = X.iloc[train_indices] X_test = X.iloc[test_indices] if augmented: y_train = y[train_indices] y_test = y[test_indices] else: y_train = y.iloc[train_indices] y_test = y.iloc[test_indices] return X_train, X_test, y_train, y_test, pca def fit_from_hp_dict( build_model, hp_dict, nn_inputs, indices, y_train, y_test, early_stopping=True, validation=True, epochs=1024, ): """Using saved HyperParameter.values dict, build the tuned model, train it, and plot diagnostics.""" best_hyperparameters = kt.HyperParameters() best_hyperparameters.values = hp_dict best_model = build_model(best_hyperparameters) if early_stopping: stop_early = tf.keras.callbacks.EarlyStopping( monitor="val_loss", patience=16, restore_best_weights=True ) callbacks = [ stop_early, ] else: callbacks = [] if validation: validation_data = (nn_inputs[indices[1]], y_test) else: validation_data = None history = best_model.fit( nn_inputs[indices[0]], y_train, epochs=epochs, validation_data=validation_data, callbacks=callbacks, ) plt.clf() plt.plot( history.history["root_mean_squared_error"], label="root_mean_squared_error" ) plt.plot( history.history["val_root_mean_squared_error"], label="val_root_mean_squared_error", ) plt.xlabel("Epoch") plt.ylabel("RMSE") plt.legend(loc="lower right") plt.show() plt.close() return best_model def augmented_loss(loss_instance, y, y_val, sample_weight=None): """This uses the standard_error column to generate a different sample from the target distribution at every training step, to use for loss calculation.""" if y.shape == y_val.shape: y_sample = y else: y_sample = tf.random.normal( [ 1, ], y[:, 0], y[:, 1], y_val.dtype, ) return loss_instance(stop_gradient(y_sample), y_val, sample_weight) # word length and frequency CNN # To feed the model, use the first WORDS_PER_EXCERPT words of the excerpt. # If less is available, fill with empty def extract_len_freq(word_list, no_words=WORDS_PER_EXCERPT): # cut to the right length, normalize, extract word frequency res = [[len(x) * 1.0 / max_word_length, word_freq(x)] for x in word_list[:no_words]] if len(res) < no_words: res += [ [0, 0], ] * (no_words - len(res)) res = np.array(res, dtype=np.float) return res def nn_preprocess_len_freq(data): return np.vstack(data.word_list.apply(extract_len_freq).to_numpy()).reshape( (-1, WORDS_PER_EXCERPT, 2) ) def build_cnn_len_freq(hp, augmented=True): nn = models.Sequential() nn.add(layers.Input(shape=(WORDS_PER_EXCERPT, 2))) num_conv_blocks = hp.Int("conv_blocks", 1, 5) for i in range(num_conv_blocks): with hp.conditional_scope("conv_blocks", list(range(i + 1, 5 + 1))): # num > i nn.add( layers.Conv1D( hp.Int("filters_conv%i" % i, 2, 16, step=2), ( min( nn.output_shape[1], hp.Int("kernel_conv%i" % i, 2, 16, step=2), ), ), activation="relu", ) ) pooling_choice = hp.Choice("pooling%i" % i, ["avg", "max", "none"]) with hp.conditional_scope("pooling%i" % i, ["avg", "max"]): if pooling_choice == "max": nn.add( layers.MaxPooling1D( ( min( nn.output_shape[1], hp.Int("kernel_pool%i" % i, 2, 8, step=2), ), ) ) ) elif pooling_choice == "avg": nn.add( layers.AveragePooling1D( ( min( nn.output_shape[1], hp.Int("kernel_pool%i" % i, 2, 8, step=2), ), ) ) ) nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) nn.add(layers.Flatten()) num_dense_blocks = hp.Int("dense_blocks", 1, 3) for i in range(num_dense_blocks): with hp.conditional_scope("dense_blocks", list(range(i + 1, 3 + 1))): # num > i nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) if hp.Choice("batch_norm", [True, False]): nn.add(layers.BatchNormalization()) nn.add( layers.Dense(hp.Int("dense%i" % i, 4, 64, step=4), activation="relu") ) if hp.Choice("batch_norm_output", [True, False]): nn.add(layers.BatchNormalization()) nn.add(layers.Dense(1)) if augmented: loss = lambda y1, y2, w=None: augmented_loss( tf.keras.losses.MeanSquaredError(), y1, y2, w ) else: loss = tf.keras.losses.MeanSquaredError() nn.compile( optimizer=tf.keras.optimizers.Adam( hp.Float("learning_rate", 1e-4, 0.3, sampling="log") ), loss=loss, metrics=["RootMeanSquaredError"], ) return nn best_hyperparameters_len_freq_dict = { "conv_blocks": 2, "filters_conv0": 8, "kernel_conv0": 10, "pooling0": "none", "dropout": 0.2, "dense_blocks": 2, "batch_norm": 1, "dense0": 12, "batch_norm_output": 0, "learning_rate": 0.018757792810801824, "filters_conv1": 12, "kernel_conv1": 6, "pooling1": "avg", "kernel_pool1": 4, "dense1": 4, "tuner/epochs": 32, "tuner/initial_epoch": 11, "tuner/bracket": 1, "tuner/round": 1, "tuner/trial_id": "0011a1157813e370e78f8a237ca72049", } # fit the model X_train, X_test, y_train, y_test, pca = split_data( data_train_val, target="target", indices=indices, pca=False ) nn_inputs_len_freq = nn_preprocess_len_freq(data_train_val) best_model_len_freq = fit_from_hp_dict( build_cnn_len_freq, best_hyperparameters_len_freq_dict, nn_inputs_len_freq, indices, y_train, y_test, ) data_train_val["cnn_word_len+freq"] = best_model_len_freq.predict(nn_inputs_len_freq) del X_train, X_test, y_train, y_test, pca, nn_inputs_len_freq # apply it to the test data nn_inputs_len_freq = nn_preprocess_len_freq(data_test) data_test["cnn_word_len+freq"] = best_model_len_freq.predict(nn_inputs_len_freq) del nn_inputs_len_freq # embedding cnn # For now, let's use the first WORDS_PER_EXCERPT words again. We will expand it to include the entire excerpts later.. def extract_emb(emb_matrix, no_words=WORDS_PER_EXCERPT): # cut or pad to the right length return np.concatenate( [ emb_matrix[:no_words, :], np.zeros([max(0, no_words - emb_matrix.shape[0]), emb_matrix.shape[1]]), ] ) def nn_preprocess_emb(data): return np.vstack(data.word_embeddings.apply(extract_emb).to_numpy()).reshape( (-1, WORDS_PER_EXCERPT, embedding_dim) ) def build_cnn_emb(hp, augmented=True): nn = models.Sequential() nn.add(layers.Input(shape=(WORDS_PER_EXCERPT, embedding_dim))) num_conv_blocks = hp.Int("conv_blocks", 0, 5) for i in range(num_conv_blocks): with hp.conditional_scope("conv_blocks", list(range(i + 1, 5 + 1))): # num > i nn.add( layers.Conv1D( hp.Int("filters_conv%i" % i, 2, 16, step=2), ( min( nn.output_shape[1], hp.Int("kernel_conv%i" % i, 2, 16, step=2), ), ), activation="relu", ) ) pooling_choice = hp.Choice("pooling%i" % i, ["avg", "max", "none"]) with hp.conditional_scope("pooling%i" % i, ["avg", "max"]): if pooling_choice == "max": nn.add( layers.MaxPooling1D( ( min( nn.output_shape[1], hp.Int("kernel_pool%i" % i, 2, 8, step=2), ), ) ) ) elif pooling_choice == "avg": nn.add( layers.AveragePooling1D( ( min( nn.output_shape[1], hp.Int("kernel_pool%i" % i, 2, 8, step=2), ), ) ) ) nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) nn.add(layers.Flatten()) num_dense_blocks = hp.Int("dense_blocks", 1, 5) for i in range(num_dense_blocks): with hp.conditional_scope("dense_blocks", list(range(i + 1, 5 + 1))): # num > i nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) if hp.Choice("batch_norm", [True, False]): nn.add(layers.BatchNormalization()) nn.add( layers.Dense(hp.Int("dense%i" % i, 4, 64, step=4), activation="relu") ) if hp.Choice("batch_norm_output", [True, False]): nn.add(layers.BatchNormalization()) nn.add(layers.Dense(1)) if augmented: loss = lambda y1, y2, w=None: augmented_loss( tf.keras.losses.MeanSquaredError(), y1, y2, w ) else: loss = tf.keras.losses.MeanSquaredError() nn.compile( optimizer=tf.keras.optimizers.Adam( hp.Float("learning_rate", 1e-4, 0.3, sampling="log") ), loss=loss, metrics=["RootMeanSquaredError"], ) return nn best_hyperparameters_emb_dict = { "conv_blocks": 1, "dense_blocks": 2, "dropout": 0.30000000000000004, "batch_norm": 0, "dense0": 48, "batch_norm_output": 1, "learning_rate": 0.002693667798794543, "dense1": 12, "filters_conv0": 10, "kernel_conv0": 4, "pooling0": "max", "kernel_pool0": 6, "tuner/epochs": 43, "tuner/initial_epoch": 15, "tuner/bracket": 4, "tuner/round": 3, "tuner/trial_id": "6ef193d541fe31f3ba90e45aedbaafdf", } # fit the model X_train, X_test, y_train, y_test, pca = split_data( data_train_val, target="target", indices=indices, pca=False ) nn_inputs_emb = nn_preprocess_emb(data_train_val) best_model_emb = fit_from_hp_dict( build_cnn_emb, best_hyperparameters_emb_dict, nn_inputs_emb, indices, y_train, y_test, ) data_train_val["cnn_word_embeddings"] = best_model_emb.predict(nn_inputs_emb) del X_train, X_test, y_train, y_test, pca, nn_inputs_emb # apply it to the test data nn_inputs_emb = nn_preprocess_emb(data_test) data_test["cnn_word_embeddings"] = best_model_emb.predict(nn_inputs_emb) del nn_inputs_emb # embedding lstm def extract_emb(emb_matrix, no_words=WORDS_PER_EXCERPT): # cut or pad to the right length return np.concatenate( [ emb_matrix[:no_words, :], np.zeros([max(0, no_words - emb_matrix.shape[0]), emb_matrix.shape[1]]), ] ) def nn_preprocess_emb(data): return np.vstack(data.word_embeddings.apply(extract_emb).to_numpy()).reshape( (-1, WORDS_PER_EXCERPT, embedding_dim) ) def build_lstm_emb(hp, augmented=True): nn = models.Sequential() nn.add(layers.Input(shape=(WORDS_PER_EXCERPT, embedding_dim))) num_conv_blocks = hp.Int("lstm_blocks", 1, 1) for i in range(num_conv_blocks): with hp.conditional_scope("lstm_blocks", list(range(i + 1, 5 + 1))): # num > i nn.add(layers.LSTM(hp.Int("lstm_units%i" % i, 8, 128, sampling="log"))) nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) nn.add(layers.Flatten()) num_dense_blocks = hp.Int("dense_blocks", 1, 5) for i in range(num_dense_blocks): with hp.conditional_scope("dense_blocks", list(range(i + 1, 5 + 1))): # num > i nn.add(layers.Dropout(hp.Float("dropout", 0.0, 0.7, step=0.1))) if hp.Choice("batch_norm", [True, False]): nn.add(layers.BatchNormalization()) nn.add( layers.Dense(hp.Int("dense%i" % i, 4, 64, step=4), activation="relu") ) if hp.Choice("batch_norm_output", [True, False]): nn.add(layers.BatchNormalization()) nn.add(layers.Dense(1)) if augmented: loss = lambda y1, y2, w=None: augmented_loss( tf.keras.losses.MeanSquaredError(), y1, y2, w ) else: loss = tf.keras.losses.MeanSquaredError() nn.compile( optimizer=tf.keras.optimizers.Adam( hp.Float("learning_rate", 1e-4, 0.3, sampling="log") ), loss=loss, metrics=["RootMeanSquaredError"], ) return nn best_hyperparameters_emb_lstm_dict = { "lstm_blocks": 1, "lstm_units0": 66, "dropout": 0.2, "dense_blocks": 3, "batch_norm": 1, "dense0": 20, "batch_norm_output": 1, "learning_rate": 0.0022843219066342054, "dense1": 60, "dense2": 56, "tuner/epochs": 43, "tuner/initial_epoch": 0, "tuner/bracket": 1, "tuner/round": 0, } # fit the model X_train, X_test, y_train, y_test, pca = split_data( data_train_val, target="target", indices=indices, pca=False ) nn_inputs_emb = nn_preprocess_emb(data_train_val) best_model_emb_lstm = fit_from_hp_dict( build_lstm_emb, best_hyperparameters_emb_lstm_dict, nn_inputs_emb, indices, y_train, y_test, ) data_train_val["lstm_word_embeddings"] = best_model_emb_lstm.predict(nn_inputs_emb) del X_train, X_test, y_train, y_test, pca, nn_inputs_emb # apply it to the test data nn_inputs_emb = nn_preprocess_emb(data_test) data_test["lstm_word_embeddings"] = best_model_emb_lstm.predict(nn_inputs_emb) del nn_inputs_emb # the final regressor scalers = { "QuantileTransf": QuantileTransformer(), } sgdr_best_hps = { "loss": "squared_epsilon_insensitive", "penalty": "elasticnet", "l1_ratio": 0.0, "alpha": 0.1, "epsilon": 0.001, "learning_rate": "invscaling", "eta0": 0.01, "power_t": 0.25, } sgdr_best_pca = False sgdr_best_scaler = "QuantileTransf" # first let's see how it does def try_SGDR_opt(data, indices=None, output=False): X_train, X_test, y_train, y_test, pca = split_data( data, pca=sgdr_best_pca, indices=indices, augmented=False ) reg = make_pipeline( scalers[sgdr_best_scaler], SGDRegressor(**sgdr_best_hps, random_state=random_state), ) reg.fit(X_train, y_train) print("RMSE: ", mean_squared_error(y_test, reg.predict(X_test), squared=False)) if output: return reg print("rmse from validation:") try_SGDR_opt(data_train_val, indices=indices) # now retrain on full training data X = data_train_val[data_train_val.columns.difference(noTrain_columns)] y = data_train_val["target"] reg = make_pipeline( scalers[sgdr_best_scaler], SGDRegressor(**sgdr_best_hps, random_state=random_state) ) reg.fit(X, y) print("rmse from full data:") print("RMSE: ", mean_squared_error(y, reg.predict(X), squared=False)) del X, y # finally, generate predictions for the test dataset X = data_test[data_test.columns.difference(noTrain_columns)] data_test["target"] = reg.predict(X) # save the submission data_test[["id", "target"]].to_csv("submission.csv", index=False)
false
1
8,510
0
8,745
8,510
69549127
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session train_data = pd.read_csv("/kaggle/input/indian-servers-v3/train_data.csv") train_data.head(10) train_data.isnull().any() train_data.isnull().sum() train_data.info() train_data.describe() train_data.corr() import seaborn as sns sns.heatmap(train_data.corr(), annot=True, vmin=-1, vmax=1, center=0) x = train_data.drop(["id", "price_range"], axis="columns") test_data = pd.read_csv("/kaggle/input/indian-servers-v3/test_data.csv") test_d = test_data.drop("id", axis="columns") test_data.head(10) y = train_data.price_range from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier() from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.06) model.fit(x_train, y_train) model.score(x_test, y_test) test_data["price_range"] = model.predict(test_d) result = pd.concat([test_data.id, test_data.price_range], axis="columns") result.to_csv("week4_result.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/549/69549127.ipynb
null
null
[{"Id": 69549127, "ScriptId": 18992244, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8029406, "CreationDate": "08/01/2021 11:48:12", "VersionNumber": 1.0, "Title": "notebook56a207e563", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 56.0, "LinesInsertedFromPrevious": 56.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session train_data = pd.read_csv("/kaggle/input/indian-servers-v3/train_data.csv") train_data.head(10) train_data.isnull().any() train_data.isnull().sum() train_data.info() train_data.describe() train_data.corr() import seaborn as sns sns.heatmap(train_data.corr(), annot=True, vmin=-1, vmax=1, center=0) x = train_data.drop(["id", "price_range"], axis="columns") test_data = pd.read_csv("/kaggle/input/indian-servers-v3/test_data.csv") test_d = test_data.drop("id", axis="columns") test_data.head(10) y = train_data.price_range from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier() from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.06) model.fit(x_train, y_train) model.score(x_test, y_test) test_data["price_range"] = model.predict(test_d) result = pd.concat([test_data.id, test_data.price_range], axis="columns") result.to_csv("week4_result.csv", index=False)
false
0
534
0
534
534
69549638
<jupyter_start><jupyter_text>Water Quality # Context `Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.` # Content The water_potability.csv file contains water quality metrics for 3276 different water bodies. ### 1. pH value: ```PH is an important parameter in evaluating the acid–base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52–6.83 which are in the range of WHO standards. ``` ### 2. Hardness: ```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water. Hardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.``` ### 3. Solids (Total dissolved solids - TDS): ```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ``` ### 4. Chloramines: ```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.``` ### 5. Sulfate: ```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ``` ### 6. Conductivity: ```Pure water is not a good conductor of electric current rather’s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 μS/cm. ``` ### 7. Organic_carbon: ```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA &lt; 2 mg/L as TOC in treated / drinking water, and &lt; 4 mg/Lit in source water which is use for treatment.``` ### 8. Trihalomethanes: ```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.``` ### 9. Turbidity: ```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.``` ### 10. Potability: ```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.``` Kaggle dataset identifier: water-potability <jupyter_code>import pandas as pd df = pd.read_csv('water-potability/water_potability.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 3276 entries, 0 to 3275 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 ph 2785 non-null float64 1 Hardness 3276 non-null float64 2 Solids 3276 non-null float64 3 Chloramines 3276 non-null float64 4 Sulfate 2495 non-null float64 5 Conductivity 3276 non-null float64 6 Organic_carbon 3276 non-null float64 7 Trihalomethanes 3114 non-null float64 8 Turbidity 3276 non-null float64 9 Potability 3276 non-null int64 dtypes: float64(9), int64(1) memory usage: 256.1 KB <jupyter_text>Examples: { "ph": NaN, "Hardness": 204.8904554713, "Solids": 20791.318980747, "Chloramines": 7.3002118732, "Sulfate": 368.5164413498, "Conductivity": 564.3086541722, "Organic_carbon": 10.379783078100001, "Trihalomethanes": 86.9909704615, "Turbidity": 2.9631353806, "Potability": 0.0 } { "ph": 3.7160800754, "Hardness": 129.4229205149, "Solids": 18630.0578579703, "Chloramines": 6.6352458839, "Sulfate": NaN, "Conductivity": 592.8853591349, "Organic_carbon": 15.1800131164, "Trihalomethanes": 56.3290762845, "Turbidity": 4.5006562749, "Potability": 0.0 } { "ph": 8.0991241893, "Hardness": 224.2362593936, "Solids": 19909.5417322924, "Chloramines": 9.2758836027, "Sulfate": NaN, "Conductivity": 418.6062130645, "Organic_carbon": 16.8686369296, "Trihalomethanes": 66.4200925118, "Turbidity": 3.0559337497, "Potability": 0.0 } { "ph": 8.3167658842, "Hardness": 214.3733940856, "Solids": 22018.4174407753, "Chloramines": 8.0593323774, "Sulfate": 356.8861356431, "Conductivity": 363.2665161642, "Organic_carbon": 18.4365244955, "Trihalomethanes": 100.3416743651, "Turbidity": 4.6287705368, "Potability": 0.0 } <jupyter_script># # What is Potabible water # At its most basic level, potabible water relates to the safety of water. # Many questions begin to emerge. # * Are we able to consume all fresh water types? # * What percentage of the worlds fresh water can be accessed? # * Has the water table increased as sea levels have rised? import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # # EDA # Import the dataset for review as a DataFrame df = pd.read_csv("../input/water-potability/water_potability.csv") # Review the first 5 observations df.head() # Display information about the DataFrame df.info(memory_usage="deep") # Shape of the dataframe print(df.shape) # Find the number of rows within a dataframe print(len(df)) # Extracting information from the shape tuple print(f"Number of rows: {df.shape[0]} \nNumber of columns: {df.shape[1]}") # ### 1a. Summary statistics # Review the high level summary details for each variable df.describe() # ### 1b. Missing values # Check for the missing values by columns df.isnull().sum() # Proportion of missing values by column def isnull_prop(df): total_rows = df.shape[0] missing_val_dict = {} for col in df.columns: missing_val_dict[col] = [ df[col].isnull().sum(), (df[col].isnull().sum() / total_rows), ] return missing_val_dict # Apply the missing value method null_dict = isnull_prop(df) print(null_dict.items()) # Create a dataframe of the missing value information df_missing = pd.DataFrame.from_dict( null_dict, orient="index", columns=["missing", "miss_percent"] ) df_missing # Display missing values using a heatmap to understand if any patterns are present plt.figure(figsize=(15, 8)) sns.heatmap(df.isnull()) # set the histogram, mean and median sns.displot(df["ph"], kde=False) plt.axvline(x=df.ph.mean(), linewidth=3, color="g", label="mean", alpha=0.5) plt.axvline(x=df.ph.median(), linewidth=3, color="y", label="median", alpha=0.5) # set title, legends and labels plt.xlabel("ph") plt.ylabel("Count") plt.title("Distribution of ph", size=14) plt.legend(["mean", "median"]) print( f"Mean pH value {df.ph.mean()} \n Median pH value {df.ph.median()} \n Min pH value {df.ph.min()} \n Max pH value {df.ph.max()}" ) # Do these values of pH relate to actual water or are there a wider range of sources being supplied? # ![pH scale](https://www.scienceabc.com/wp-content/uploads/2019/07/A-pH-scale-on-white-background-illustration-VectorBlueRingMedias.jpg) # # Predict Potability # Preprocessing from sklearn.preprocessing import scale from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.feature_selection import RFE # Classifiers from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier # Hyperparameter tuning from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint # Performance metrics from sklearn.model_selection import cross_val_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_auc_score # Apply mean value to the missing values df["ph"].fillna(df["ph"].mean(), inplace=True) df["Sulfate"].fillna(df["Sulfate"].mean(), inplace=True) df["Trihalomethanes"].fillna(df["Trihalomethanes"].mean(), inplace=True) df.isnull().sum() # Separate into X and y variables X = df.drop(["Potability"], axis=1) y = df["Potability"].values # Display the features X.head() # Does scaling the features change the dynamics X_scaled = scale(X) # Print the mean and standard deviation of the unscaled features print("Mean of Unscaled Features: {}".format(np.mean(X))) print("Standard Deviation of Unscaled Features: {}".format(np.std(X))) # Print the mean and standard deviation of the scaled features print("Mean of Scaled Features: {}".format(np.mean(X_scaled))) print("Standard Deviation of Scaled Features: {}".format(np.std(X_scaled))) # k-NN classifier # Split into training and test set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.4, random_state=2, stratify=y ) # Create a k-NN classifier with 7 neighbors knn = KNeighborsClassifier(n_neighbors=7) # Fit the classifier to the training data knn.fit(X_train, y_train) # Print the accuracy print(knn.score(X_test, y_test)) # Lets understand the performance of the k-NN classifer across a range of clusters # Setup arrays to store train and test accuracies neighbors = np.arange(1, 12) train_accuracy = np.empty(len(neighbors)) test_accuracy = np.empty(len(neighbors)) # Loop over different values of k for i, k in enumerate(neighbors): # Setup a k-NN Classifier with k neighbors knn = KNeighborsClassifier(n_neighbors=k) # Fit the classifier to the training data knn.fit(X_train, y_train) # Compute accuracy on the training set train_accuracy[i] = knn.score(X_train, y_train) # Compute accuracy on the testing set test_accuracy[i] = knn.score(X_test, y_test) # Generate plot plt.title("k-NN: Varying Number of Neighbors") plt.plot(neighbors, test_accuracy, label="Testing Accuracy") plt.plot(neighbors, train_accuracy, label="Training Accuracy") plt.legend() plt.xlabel("Number of Neighbors") plt.ylabel("Accuracy") plt.show() # Setup the pipeline steps steps = [("scaler", StandardScaler()), ("knn", KNeighborsClassifier())] # Create the pipeline pipeline = Pipeline(steps) # Fit the pipeline to the training set knn_scaled = pipeline.fit(X_train, y_train) # Instantiate and fit a k-NN classifier to the unscaled data knn_unscaled = KNeighborsClassifier().fit(X_train, y_train) # Compute and print metrics print("Accuracy with Scaling: {}".format(knn_scaled.score(X_test, y_test))) print("Accuracy without Scaling: {}".format(knn_unscaled.score(X_test, y_test))) # Decision Tree classifier # Setup the parameters and distributions to sample param_dist = { "max_depth": [3, None], "max_features": randint(1, 9), "min_samples_leaf": randint(1, 9), "criterion": ["gini", "entropy"], } # Instantiate a Decision Tree classifier tree = DecisionTreeClassifier() # Instantiate the RandomizedSearchCV object tree_cv = RandomizedSearchCV(tree, param_dist, cv=5) # Fit it to the data tree_cv.fit(X, y) # Print the tuned parameters and score print("Tuned Decision Tree Parameters: {}".format(tree_cv.best_params_)) print("Best score is {}".format(tree_cv.best_score_)) class ModelBuild: # Constructor def __init__( self, X, y, model=DecisionTreeClassifier(criterion="gini", max_depth=3, min_samples_leaf=8), ): self.X = X self.y = y self.model = model # Method to perform the train test split def _train_test_split(self): X_train, X_test, y_train, y_test = train_test_split( self.X, self.y, test_size=0.3, random_state=42 ) return X_train, X_test, y_train, y_test # Method to set the pipeline def _pipeline(self): steps = [("scaler", StandardScaler()), ("model_name", self.model)] return Pipeline(steps) # Method to run all steps def model_build(self): if __name__ == "__main__": X_train, X_test, y_train, y_test = self._train_test_split() pipeline = self._pipeline() fit = pipeline.fit(X_train, y_train) return print("Accuracy: {}".format(pipeline.score(X_test, y_test))) ModelBuild(X, y).model_build() class FeatureSelection(ModelBuild): # Inherit the ModelBuild features def __init__(self, X, y, model=RandomForestClassifier()): super().__init__(X, y, model=RandomForestClassifier()) self.X = X self.y = y self.model = model # Method to evaluate list of models def rfe_model(self): model_dict = dict() for i in range(2, len(self.X.columns)): rfe = RFE(estimator=self.model, n_features_to_select=i) model = DecisionTreeClassifier() model_dict[str(i)] = Pipeline(steps=[("rfe", rfe), ("mod", model)]) return model_dict # Method to evaluate the models def eval_model(self, model): cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=6) scores = cross_val_score( model, self.X, self.y, scoring="accuracy", cv=cv, n_jobs=-1, error_score="raise", ) return scores # Lets understand the features being selected def feature_select(self, n_feature): rfe = RFE(estimator=self.model, n_features_to_select=n_feature) rfe.fit(self.X, self.y) # for i in range(X.shape[1]): for i, col in enumerate(X.columns): print( "Column: %s, Selected %s, Rank: %.3f" % (col, rfe.support_[i], rfe.ranking_[i]) ) # Method to run all steps def feature_selection(self): if __name__ == "__main__": models = self.rfe_model() results, names = list(), list() for name, model in models.items(): scores = self.eval_model(model) results.append(scores) names.append(name) print( f"{name}, mean_score: {np.mean(scores)}, std_score: {np.std(scores)}" ) box_plt = plt.boxplot(results, labels=names, showmeans=True) return box_plt box = FeatureSelection( X, y, model=DecisionTreeClassifier(criterion="gini", max_depth=3, min_samples_leaf=8), ).feature_selection() plt.show() features = FeatureSelection( X, y, model=DecisionTreeClassifier(criterion="gini", max_depth=3, min_samples_leaf=8), ).feature_select(5) # Lets try a Light GBM from lightgbm import LGBMClassifier # Split into training and test set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.4, random_state=2, stratify=y ) # Instantiate the LGBM lgbm = LGBMClassifier() # Fit the classifier to the training data lgbm.fit(X_train, y_train) # Perform prediction y_pred = lgbm.predict(X_test) # Print the accuracy print(lgbm.score(X_test, y_test)) print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Lets understand the baseline params lgbm.get_params() # Setup the pipeline steps = [("scaler", StandardScaler()), ("lgbm", LGBMClassifier())] pipeline = Pipeline(steps) # Specify the hyperparameter space parameters = { "lgbm__learning_rate": [0.03, 0.05, 0.1], "lgbm__objective": ["binary"], "lgbm__metric": ["binary_logloss"], "lgbm__max_depth": [10], "lgbm__n_estimators": [100, 200, 300], } # Create train and test sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # Instantiate the GridSearchCV object cv = GridSearchCV(pipeline, parameters, cv=3) # Fit to the training set cv.fit(X_train, y_train) # Predict the labels of the test set y_pred = cv.predict(X_test) # Display best score and params print(f"Best score : {cv.best_score_}") print(f"Best params : {cv.best_params_}") # Compute and print metrics print("Accuracy: {}".format(cv.score(X_test, y_test))) print(classification_report(y_test, y_pred))
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/549/69549638.ipynb
water-potability
adityakadiwal
[{"Id": 69549638, "ScriptId": 18933145, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 344436, "CreationDate": "08/01/2021 11:55:50", "VersionNumber": 2.0, "Title": "Water quality EDA and Potability analysis", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 351.0, "LinesInsertedFromPrevious": 14.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 337.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 21}]
[{"Id": 92864688, "KernelVersionId": 69549638, "SourceDatasetVersionId": 2157486}]
[{"Id": 2157486, "DatasetId": 1292407, "DatasourceVersionId": 2198621, "CreatorUserId": 5454565, "LicenseName": "CC0: Public Domain", "CreationDate": "04/25/2021 10:27:44", "VersionNumber": 3.0, "Title": "Water Quality", "Slug": "water-potability", "Subtitle": "Drinking water potability", "Description": "# Context\n\n`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`\n\n\n# Content\n\n\nThe water_potability.csv file contains water quality metrics for 3276 different water bodies. \n### 1. pH value:\n```PH is an important parameter in evaluating the acid\u2013base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52\u20136.83 which are in the range of WHO standards. ```\n\n### 2. Hardness:\n```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.\nHardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```\n\n### 3. Solids (Total dissolved solids - TDS): \n```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```\n\n### 4. Chloramines: \n```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```\n\n### 5. Sulfate: \n```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```\n\n### 6. Conductivity: \n```Pure water is not a good conductor of electric current rather\u2019s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 \u03bcS/cm. ```\n\n### 7. Organic_carbon: \n ```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA &lt; 2 mg/L as TOC in treated / drinking water, and &lt; 4 mg/Lit in source water which is use for treatment.```\n\n### 8. Trihalomethanes: \n```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```\n\n### 9. Turbidity: \n```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```\n\n### 10. Potability: \n```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```", "VersionNotes": "Removed garbage column", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1292407, "CreatorUserId": 5454565, "OwnerUserId": 5454565.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2157486.0, "CurrentDatasourceVersionId": 2198621.0, "ForumId": 1311077, "Type": 2, "CreationDate": "04/24/2021 07:18:57", "LastActivityDate": "04/24/2021", "TotalViews": 422520, "TotalDownloads": 61531, "TotalVotes": 1262, "TotalKernels": 437}]
[{"Id": 5454565, "UserName": "adityakadiwal", "DisplayName": "Aditya Kadiwal", "RegisterDate": "07/12/2020", "PerformanceTier": 2}]
# # What is Potabible water # At its most basic level, potabible water relates to the safety of water. # Many questions begin to emerge. # * Are we able to consume all fresh water types? # * What percentage of the worlds fresh water can be accessed? # * Has the water table increased as sea levels have rised? import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # # EDA # Import the dataset for review as a DataFrame df = pd.read_csv("../input/water-potability/water_potability.csv") # Review the first 5 observations df.head() # Display information about the DataFrame df.info(memory_usage="deep") # Shape of the dataframe print(df.shape) # Find the number of rows within a dataframe print(len(df)) # Extracting information from the shape tuple print(f"Number of rows: {df.shape[0]} \nNumber of columns: {df.shape[1]}") # ### 1a. Summary statistics # Review the high level summary details for each variable df.describe() # ### 1b. Missing values # Check for the missing values by columns df.isnull().sum() # Proportion of missing values by column def isnull_prop(df): total_rows = df.shape[0] missing_val_dict = {} for col in df.columns: missing_val_dict[col] = [ df[col].isnull().sum(), (df[col].isnull().sum() / total_rows), ] return missing_val_dict # Apply the missing value method null_dict = isnull_prop(df) print(null_dict.items()) # Create a dataframe of the missing value information df_missing = pd.DataFrame.from_dict( null_dict, orient="index", columns=["missing", "miss_percent"] ) df_missing # Display missing values using a heatmap to understand if any patterns are present plt.figure(figsize=(15, 8)) sns.heatmap(df.isnull()) # set the histogram, mean and median sns.displot(df["ph"], kde=False) plt.axvline(x=df.ph.mean(), linewidth=3, color="g", label="mean", alpha=0.5) plt.axvline(x=df.ph.median(), linewidth=3, color="y", label="median", alpha=0.5) # set title, legends and labels plt.xlabel("ph") plt.ylabel("Count") plt.title("Distribution of ph", size=14) plt.legend(["mean", "median"]) print( f"Mean pH value {df.ph.mean()} \n Median pH value {df.ph.median()} \n Min pH value {df.ph.min()} \n Max pH value {df.ph.max()}" ) # Do these values of pH relate to actual water or are there a wider range of sources being supplied? # ![pH scale](https://www.scienceabc.com/wp-content/uploads/2019/07/A-pH-scale-on-white-background-illustration-VectorBlueRingMedias.jpg) # # Predict Potability # Preprocessing from sklearn.preprocessing import scale from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.feature_selection import RFE # Classifiers from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier # Hyperparameter tuning from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint # Performance metrics from sklearn.model_selection import cross_val_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_auc_score # Apply mean value to the missing values df["ph"].fillna(df["ph"].mean(), inplace=True) df["Sulfate"].fillna(df["Sulfate"].mean(), inplace=True) df["Trihalomethanes"].fillna(df["Trihalomethanes"].mean(), inplace=True) df.isnull().sum() # Separate into X and y variables X = df.drop(["Potability"], axis=1) y = df["Potability"].values # Display the features X.head() # Does scaling the features change the dynamics X_scaled = scale(X) # Print the mean and standard deviation of the unscaled features print("Mean of Unscaled Features: {}".format(np.mean(X))) print("Standard Deviation of Unscaled Features: {}".format(np.std(X))) # Print the mean and standard deviation of the scaled features print("Mean of Scaled Features: {}".format(np.mean(X_scaled))) print("Standard Deviation of Scaled Features: {}".format(np.std(X_scaled))) # k-NN classifier # Split into training and test set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.4, random_state=2, stratify=y ) # Create a k-NN classifier with 7 neighbors knn = KNeighborsClassifier(n_neighbors=7) # Fit the classifier to the training data knn.fit(X_train, y_train) # Print the accuracy print(knn.score(X_test, y_test)) # Lets understand the performance of the k-NN classifer across a range of clusters # Setup arrays to store train and test accuracies neighbors = np.arange(1, 12) train_accuracy = np.empty(len(neighbors)) test_accuracy = np.empty(len(neighbors)) # Loop over different values of k for i, k in enumerate(neighbors): # Setup a k-NN Classifier with k neighbors knn = KNeighborsClassifier(n_neighbors=k) # Fit the classifier to the training data knn.fit(X_train, y_train) # Compute accuracy on the training set train_accuracy[i] = knn.score(X_train, y_train) # Compute accuracy on the testing set test_accuracy[i] = knn.score(X_test, y_test) # Generate plot plt.title("k-NN: Varying Number of Neighbors") plt.plot(neighbors, test_accuracy, label="Testing Accuracy") plt.plot(neighbors, train_accuracy, label="Training Accuracy") plt.legend() plt.xlabel("Number of Neighbors") plt.ylabel("Accuracy") plt.show() # Setup the pipeline steps steps = [("scaler", StandardScaler()), ("knn", KNeighborsClassifier())] # Create the pipeline pipeline = Pipeline(steps) # Fit the pipeline to the training set knn_scaled = pipeline.fit(X_train, y_train) # Instantiate and fit a k-NN classifier to the unscaled data knn_unscaled = KNeighborsClassifier().fit(X_train, y_train) # Compute and print metrics print("Accuracy with Scaling: {}".format(knn_scaled.score(X_test, y_test))) print("Accuracy without Scaling: {}".format(knn_unscaled.score(X_test, y_test))) # Decision Tree classifier # Setup the parameters and distributions to sample param_dist = { "max_depth": [3, None], "max_features": randint(1, 9), "min_samples_leaf": randint(1, 9), "criterion": ["gini", "entropy"], } # Instantiate a Decision Tree classifier tree = DecisionTreeClassifier() # Instantiate the RandomizedSearchCV object tree_cv = RandomizedSearchCV(tree, param_dist, cv=5) # Fit it to the data tree_cv.fit(X, y) # Print the tuned parameters and score print("Tuned Decision Tree Parameters: {}".format(tree_cv.best_params_)) print("Best score is {}".format(tree_cv.best_score_)) class ModelBuild: # Constructor def __init__( self, X, y, model=DecisionTreeClassifier(criterion="gini", max_depth=3, min_samples_leaf=8), ): self.X = X self.y = y self.model = model # Method to perform the train test split def _train_test_split(self): X_train, X_test, y_train, y_test = train_test_split( self.X, self.y, test_size=0.3, random_state=42 ) return X_train, X_test, y_train, y_test # Method to set the pipeline def _pipeline(self): steps = [("scaler", StandardScaler()), ("model_name", self.model)] return Pipeline(steps) # Method to run all steps def model_build(self): if __name__ == "__main__": X_train, X_test, y_train, y_test = self._train_test_split() pipeline = self._pipeline() fit = pipeline.fit(X_train, y_train) return print("Accuracy: {}".format(pipeline.score(X_test, y_test))) ModelBuild(X, y).model_build() class FeatureSelection(ModelBuild): # Inherit the ModelBuild features def __init__(self, X, y, model=RandomForestClassifier()): super().__init__(X, y, model=RandomForestClassifier()) self.X = X self.y = y self.model = model # Method to evaluate list of models def rfe_model(self): model_dict = dict() for i in range(2, len(self.X.columns)): rfe = RFE(estimator=self.model, n_features_to_select=i) model = DecisionTreeClassifier() model_dict[str(i)] = Pipeline(steps=[("rfe", rfe), ("mod", model)]) return model_dict # Method to evaluate the models def eval_model(self, model): cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=6) scores = cross_val_score( model, self.X, self.y, scoring="accuracy", cv=cv, n_jobs=-1, error_score="raise", ) return scores # Lets understand the features being selected def feature_select(self, n_feature): rfe = RFE(estimator=self.model, n_features_to_select=n_feature) rfe.fit(self.X, self.y) # for i in range(X.shape[1]): for i, col in enumerate(X.columns): print( "Column: %s, Selected %s, Rank: %.3f" % (col, rfe.support_[i], rfe.ranking_[i]) ) # Method to run all steps def feature_selection(self): if __name__ == "__main__": models = self.rfe_model() results, names = list(), list() for name, model in models.items(): scores = self.eval_model(model) results.append(scores) names.append(name) print( f"{name}, mean_score: {np.mean(scores)}, std_score: {np.std(scores)}" ) box_plt = plt.boxplot(results, labels=names, showmeans=True) return box_plt box = FeatureSelection( X, y, model=DecisionTreeClassifier(criterion="gini", max_depth=3, min_samples_leaf=8), ).feature_selection() plt.show() features = FeatureSelection( X, y, model=DecisionTreeClassifier(criterion="gini", max_depth=3, min_samples_leaf=8), ).feature_select(5) # Lets try a Light GBM from lightgbm import LGBMClassifier # Split into training and test set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.4, random_state=2, stratify=y ) # Instantiate the LGBM lgbm = LGBMClassifier() # Fit the classifier to the training data lgbm.fit(X_train, y_train) # Perform prediction y_pred = lgbm.predict(X_test) # Print the accuracy print(lgbm.score(X_test, y_test)) print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Lets understand the baseline params lgbm.get_params() # Setup the pipeline steps = [("scaler", StandardScaler()), ("lgbm", LGBMClassifier())] pipeline = Pipeline(steps) # Specify the hyperparameter space parameters = { "lgbm__learning_rate": [0.03, 0.05, 0.1], "lgbm__objective": ["binary"], "lgbm__metric": ["binary_logloss"], "lgbm__max_depth": [10], "lgbm__n_estimators": [100, 200, 300], } # Create train and test sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # Instantiate the GridSearchCV object cv = GridSearchCV(pipeline, parameters, cv=3) # Fit to the training set cv.fit(X_train, y_train) # Predict the labels of the test set y_pred = cv.predict(X_test) # Display best score and params print(f"Best score : {cv.best_score_}") print(f"Best params : {cv.best_params_}") # Compute and print metrics print("Accuracy: {}".format(cv.score(X_test, y_test))) print(classification_report(y_test, y_pred))
[{"water-potability/water_potability.csv": {"column_names": "[\"ph\", \"Hardness\", \"Solids\", \"Chloramines\", \"Sulfate\", \"Conductivity\", \"Organic_carbon\", \"Trihalomethanes\", \"Turbidity\", \"Potability\"]", "column_data_types": "{\"ph\": \"float64\", \"Hardness\": \"float64\", \"Solids\": \"float64\", \"Chloramines\": \"float64\", \"Sulfate\": \"float64\", \"Conductivity\": \"float64\", \"Organic_carbon\": \"float64\", \"Trihalomethanes\": \"float64\", \"Turbidity\": \"float64\", \"Potability\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3276 entries, 0 to 3275\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ph 2785 non-null float64\n 1 Hardness 3276 non-null float64\n 2 Solids 3276 non-null float64\n 3 Chloramines 3276 non-null float64\n 4 Sulfate 2495 non-null float64\n 5 Conductivity 3276 non-null float64\n 6 Organic_carbon 3276 non-null float64\n 7 Trihalomethanes 3114 non-null float64\n 8 Turbidity 3276 non-null float64\n 9 Potability 3276 non-null int64 \ndtypes: float64(9), int64(1)\nmemory usage: 256.1 KB\n", "summary": "{\"ph\": {\"count\": 2785.0, \"mean\": 7.080794504276835, \"std\": 1.5943195187088104, \"min\": 0.0, \"25%\": 6.09309191422186, \"50%\": 7.036752103833548, \"75%\": 8.06206612314847, \"max\": 13.999999999999998}, \"Hardness\": {\"count\": 3276.0, \"mean\": 196.36949601730151, \"std\": 32.879761476294156, \"min\": 47.432, \"25%\": 176.85053787752437, \"50%\": 196.96762686363076, \"75%\": 216.66745621487073, \"max\": 323.124}, \"Solids\": {\"count\": 3276.0, \"mean\": 22014.092526077104, \"std\": 8768.570827785927, \"min\": 320.942611274359, \"25%\": 15666.69029696465, \"50%\": 20927.833606520187, \"75%\": 27332.762127438615, \"max\": 61227.19600771213}, \"Chloramines\": {\"count\": 3276.0, \"mean\": 7.122276793425786, \"std\": 1.5830848890397096, \"min\": 0.3520000000000003, \"25%\": 6.1274207554913, \"50%\": 7.130298973883081, \"75%\": 8.114887032109028, \"max\": 13.127000000000002}, \"Sulfate\": {\"count\": 2495.0, \"mean\": 333.7757766108135, \"std\": 41.416840461672706, \"min\": 129.00000000000003, \"25%\": 307.69949783471964, \"50%\": 333.073545745888, \"75%\": 359.9501703847443, \"max\": 481.0306423059972}, \"Conductivity\": {\"count\": 3276.0, \"mean\": 426.20511068255325, \"std\": 80.8240640511118, \"min\": 181.483753985146, \"25%\": 365.7344141184627, \"50%\": 421.8849682800544, \"75%\": 481.7923044877282, \"max\": 753.3426195583046}, \"Organic_carbon\": {\"count\": 3276.0, \"mean\": 14.284970247677318, \"std\": 3.308161999126874, \"min\": 2.1999999999999886, \"25%\": 12.065801333613067, \"50%\": 14.218337937208588, \"75%\": 16.557651543843434, \"max\": 28.30000000000001}, \"Trihalomethanes\": {\"count\": 3114.0, \"mean\": 66.39629294676803, \"std\": 16.175008422218657, \"min\": 0.7379999999999995, \"25%\": 55.844535620979954, \"50%\": 66.62248509808484, \"75%\": 77.33747290873062, \"max\": 124.0}, \"Turbidity\": {\"count\": 3276.0, \"mean\": 3.966786169791058, \"std\": 0.7803824084854124, \"min\": 1.45, \"25%\": 3.439710869612912, \"50%\": 3.955027562993039, \"75%\": 4.50031978728511, \"max\": 6.739}, \"Potability\": {\"count\": 3276.0, \"mean\": 0.3901098901098901, \"std\": 0.48784916967025516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"ph\":{\"0\":null,\"1\":3.7160800754,\"2\":8.0991241893,\"3\":8.3167658842},\"Hardness\":{\"0\":204.8904554713,\"1\":129.4229205149,\"2\":224.2362593936,\"3\":214.3733940856},\"Solids\":{\"0\":20791.318980747,\"1\":18630.0578579703,\"2\":19909.5417322924,\"3\":22018.4174407753},\"Chloramines\":{\"0\":7.3002118732,\"1\":6.6352458839,\"2\":9.2758836027,\"3\":8.0593323774},\"Sulfate\":{\"0\":368.5164413498,\"1\":null,\"2\":null,\"3\":356.8861356431},\"Conductivity\":{\"0\":564.3086541722,\"1\":592.8853591349,\"2\":418.6062130645,\"3\":363.2665161642},\"Organic_carbon\":{\"0\":10.3797830781,\"1\":15.1800131164,\"2\":16.8686369296,\"3\":18.4365244955},\"Trihalomethanes\":{\"0\":86.9909704615,\"1\":56.3290762845,\"2\":66.4200925118,\"3\":100.3416743651},\"Turbidity\":{\"0\":2.9631353806,\"1\":4.5006562749,\"2\":3.0559337497,\"3\":4.6287705368},\"Potability\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0}}"}}]
true
1
<start_data_description><data_path>water-potability/water_potability.csv: <column_names> ['ph', 'Hardness', 'Solids', 'Chloramines', 'Sulfate', 'Conductivity', 'Organic_carbon', 'Trihalomethanes', 'Turbidity', 'Potability'] <column_types> {'ph': 'float64', 'Hardness': 'float64', 'Solids': 'float64', 'Chloramines': 'float64', 'Sulfate': 'float64', 'Conductivity': 'float64', 'Organic_carbon': 'float64', 'Trihalomethanes': 'float64', 'Turbidity': 'float64', 'Potability': 'int64'} <dataframe_Summary> {'ph': {'count': 2785.0, 'mean': 7.080794504276835, 'std': 1.5943195187088104, 'min': 0.0, '25%': 6.09309191422186, '50%': 7.036752103833548, '75%': 8.06206612314847, 'max': 13.999999999999998}, 'Hardness': {'count': 3276.0, 'mean': 196.36949601730151, 'std': 32.879761476294156, 'min': 47.432, '25%': 176.85053787752437, '50%': 196.96762686363076, '75%': 216.66745621487073, 'max': 323.124}, 'Solids': {'count': 3276.0, 'mean': 22014.092526077104, 'std': 8768.570827785927, 'min': 320.942611274359, '25%': 15666.69029696465, '50%': 20927.833606520187, '75%': 27332.762127438615, 'max': 61227.19600771213}, 'Chloramines': {'count': 3276.0, 'mean': 7.122276793425786, 'std': 1.5830848890397096, 'min': 0.3520000000000003, '25%': 6.1274207554913, '50%': 7.130298973883081, '75%': 8.114887032109028, 'max': 13.127000000000002}, 'Sulfate': {'count': 2495.0, 'mean': 333.7757766108135, 'std': 41.416840461672706, 'min': 129.00000000000003, '25%': 307.69949783471964, '50%': 333.073545745888, '75%': 359.9501703847443, 'max': 481.0306423059972}, 'Conductivity': {'count': 3276.0, 'mean': 426.20511068255325, 'std': 80.8240640511118, 'min': 181.483753985146, '25%': 365.7344141184627, '50%': 421.8849682800544, '75%': 481.7923044877282, 'max': 753.3426195583046}, 'Organic_carbon': {'count': 3276.0, 'mean': 14.284970247677318, 'std': 3.308161999126874, 'min': 2.1999999999999886, '25%': 12.065801333613067, '50%': 14.218337937208588, '75%': 16.557651543843434, 'max': 28.30000000000001}, 'Trihalomethanes': {'count': 3114.0, 'mean': 66.39629294676803, 'std': 16.175008422218657, 'min': 0.7379999999999995, '25%': 55.844535620979954, '50%': 66.62248509808484, '75%': 77.33747290873062, 'max': 124.0}, 'Turbidity': {'count': 3276.0, 'mean': 3.966786169791058, 'std': 0.7803824084854124, 'min': 1.45, '25%': 3.439710869612912, '50%': 3.955027562993039, '75%': 4.50031978728511, 'max': 6.739}, 'Potability': {'count': 3276.0, 'mean': 0.3901098901098901, 'std': 0.48784916967025516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}} <dataframe_info> RangeIndex: 3276 entries, 0 to 3275 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 ph 2785 non-null float64 1 Hardness 3276 non-null float64 2 Solids 3276 non-null float64 3 Chloramines 3276 non-null float64 4 Sulfate 2495 non-null float64 5 Conductivity 3276 non-null float64 6 Organic_carbon 3276 non-null float64 7 Trihalomethanes 3114 non-null float64 8 Turbidity 3276 non-null float64 9 Potability 3276 non-null int64 dtypes: float64(9), int64(1) memory usage: 256.1 KB <some_examples> {'ph': {'0': None, '1': 3.7160800754, '2': 8.0991241893, '3': 8.3167658842}, 'Hardness': {'0': 204.8904554713, '1': 129.4229205149, '2': 224.2362593936, '3': 214.3733940856}, 'Solids': {'0': 20791.318980747, '1': 18630.0578579703, '2': 19909.5417322924, '3': 22018.4174407753}, 'Chloramines': {'0': 7.3002118732, '1': 6.6352458839, '2': 9.2758836027, '3': 8.0593323774}, 'Sulfate': {'0': 368.5164413498, '1': None, '2': None, '3': 356.8861356431}, 'Conductivity': {'0': 564.3086541722, '1': 592.8853591349, '2': 418.6062130645, '3': 363.2665161642}, 'Organic_carbon': {'0': 10.3797830781, '1': 15.1800131164, '2': 16.8686369296, '3': 18.4365244955}, 'Trihalomethanes': {'0': 86.9909704615, '1': 56.3290762845, '2': 66.4200925118, '3': 100.3416743651}, 'Turbidity': {'0': 2.9631353806, '1': 4.5006562749, '2': 3.0559337497, '3': 4.6287705368}, 'Potability': {'0': 0, '1': 0, '2': 0, '3': 0}} <end_description>
3,435
21
5,711
3,435
69549867
# # Introduction # This topic is my practices on the [Learning path of Udacity](https://classroom.udacity.com/nanodegrees/nd025-ent/parts/d52fd59b-fa33-4cd0-8d96-11e461669484/modules/07d3f5fa-3026-4414-b7a6-b0de6c628aea/lessons/2dac695b-c799-46b4-9b3b-c6ff460dbbf3/concepts/d0f0c9ed-424d-4360-aa59-811b52c54304) # The source dataset is taken from [Seattle Airbnb](https://www.kaggle.com/airbnb/seattle) and [Boston Airbnb](https://www.kaggle.com/airbnb/boston) # According to the requirement `"pose at least three questions related to business or real-world applications of how the data could be used"` of Udacity, I had figured out 5 questions in this topic # >- 1) Try to understand how much AirBNB homes are earning in certain time frames and areas/ cities. # >- 2) Examine the Total_price by top20 listing_id at each city and its reservation days. # >- 3) Try to understand if there is anything about the properties that helps you predict price, based on the 2 datasets: boston_listings and seattle_listings. # >- 4) Can you find negative and positive reviews based on text? # >- 5) Make 2 time-series model to forecasting the difference of the reservation_prices between 2 cities. # Follow that, we can understand the difference between 2 cities by looking at these question, doing EDA and also comparing the Machine Learning models. Moreover, these 5 questions is applied many problems: # >- Time-series forecasting in Question 5 # >- Regression in Question 3 # >- Ngrams-analytics (a part of NLP) in Question 4. # Name: Do Van Nhan # Account: NhanDV6 # Now, we must import the necessary libraries then load our dataset import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") sea_calendar = pd.read_csv("../input/compare/seattle_calendar.csv") bos_calendar = pd.read_csv("../input/compare/boston_calendar.csv") sea_reviews = pd.read_csv("../input/compare/seattle_reviews.csv") bos_reviews = pd.read_csv("../input/compare/boston_reviews.csv") sea_listings = pd.read_csv("../input/compare/seattle_listings.csv") bos_listings = pd.read_csv("../input/compare/boston_listings.csv") # # EDA # ### Viewing size of dataset # ##### Figure out our dataset: Number of columns, number of rows and memory usages dfs = [sea_calendar, sea_reviews, sea_listings, bos_calendar, bos_reviews, bos_listings] subtit = ["Number of columns", "Number of rows", "Memory usage"] df = pd.DataFrame( { "Nb_cols": [df.shape[1] for df in dfs], "Nb_rows": [df.shape[0] for df in dfs], "Memory_usage (Mb)": [ round(df.memory_usage().sum() / 1024**2, 3) for df in dfs ], "dataset": [ "Seattle_calendar", "Seattle_reviews", "Seattle_listings", "Boston_calendar", "Boston_reviews", "Boston_listings", ], } ) # Visualize fig, ax = plt.subplots(nrows=3, ncols=1, figsize=(20, 10)) for k, c in enumerate(["c", "y", "orange"]): df.plot( x="dataset", title=subtit[k], y=list(df.columns)[k], kind="barh", color=c, ax=ax[k], ) for p in ax[k].patches: ax[k].annotate( str(p.get_width()), (p.get_width() + 1, p.get_y() + p.get_height() - 0.25) ) plt.show() # ## 1. Looking at the missing values def countNA_merge_cities(boston_df, seattle_df): merge_df = pd.DataFrame( { "columns": list(seattle_df.columns), "isnull_Boston": boston_df.isnull().sum().values, "isnull_Seattle": seattle_df.isnull().sum().values, } ) return merge_df def percentage_NA_merge_cities(boston_df, seattle_df): merge_df = pd.DataFrame( { "columns": list(seattle_df.columns), "isnull_Boston": 100 * boston_df.isnull().sum().values / len(boston_df), "isnull_Seattle": 100 * seattle_df.isnull().sum().values / len(seattle_df), } ).round(2) return merge_df # The first 2 groups `calendar` and `reviews` in 2 cities `Boston` and `Seattle` has the same schemas. # >> In the `calendar_df: Boston and Seattle`, they have the same 4 columns: `price`, `available`, `date` and `listing_id` # >> In the `review_df: Boston and Seattle`, they have the same 5 columns: `comments`, `reviewer_name`, `reviewer_id`, `id` and `listing_id`. seattle_df = [sea_calendar, sea_reviews] boston_df = [bos_calendar, bos_reviews] sub_title = ["calendar_df", "review_df"] fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(23, 8)) ax = ax.ravel() for k in range(2): calendar_df = countNA_merge_cities(seattle_df[k], boston_df[k]) calendar_pr = percentage_NA_merge_cities(seattle_df[k], boston_df[k]) calendar_df.plot( x="columns", kind="barh", ax=ax[2 * k], title="Count missing values in {}".format(sub_title[k]), ) for p in ax[2 * k].patches: ax[2 * k].annotate( int(p.get_width()), (p.get_width() + 1, p.get_y() + p.get_height() - 0.25) ) calendar_pr.plot( x="columns", kind="barh", ax=ax[2 * k + 1], title="Percentage of missing values in {}".format(sub_title[k]), ) for p in ax[2 * k + 1].patches: ax[2 * k + 1].annotate( str(p.get_width()), (p.get_width(), p.get_y() + p.get_height() - 0.25) ) plt.show() # #### Commnent 1. The number of missing values in `Seattle_df` is higher than in `Boston_df` # ### How about the group `listing` # There are many columns have too much missing values in the listing dataframes, noting that these datasets has only 3585 and 3884 rows, considered `Boston_listings` and `Seattle_listings` respectively. group_list = [sea_listings, bos_listings] colors = ["c", "y"] titles = ["missing values in Seattle_listing_df", "missing values in Boston_listing_df"] fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(25, 20)) for k in range(2): count_null = group_list[k].isnull().sum().sort_values(ascending=True) real_null = count_null[count_null > 0] real_null.plot(title=titles[k], kind="barh", color=colors[k], rot=45, ax=ax[k]) for p in ax[k].patches: ax[k].annotate( str(p.get_width()), (p.get_width() * 1.01 + 100, p.get_y() + p.get_height() - 0.5), color="black", bbox=dict(boxstyle="round,pad=0.3", fc="violet", alpha=0.3), horizontalalignment="right", ) plt.show() # ### Cleaning & Merging dataset to compare # #### For `reviews_group` # First, viewing our dataset! # Look at the output below, the column `daye` is denoted `object` while we expected its type must be `datetime`!!! print( "Seattle_reviews.csv", sea_reviews.dtypes, sea_reviews["date"].max(), sea_reviews["date"].min(), ) print( "Boston_reviews.csv", bos_reviews.dtypes, bos_reviews["date"].max(), bos_reviews["date"].min(), ) sea_reviews.head(3) # So we must # - convert the type in column `date` to `datetime` # - don't change anything in the other columns def date_trsfr_reviews_df(df): df["date"] = pd.to_datetime(df["date"]) return df reviews_df = date_trsfr_reviews_df(sea_reviews) reviews_df.head(2) # After cleaning the `reviews datasets`, then now is the time for viewing what we have in the `calendar dataframes`? print(sea_calendar.available.value_counts()) sea_calendar.head(2) # The columns `available` has 2 values is `t` (for `True`) and `f` (for `False`) # #### For the calendar_group: # >- Remove the `$` and `,` in `price` then convert the values to `float` # >- Replace `t, f` in `available` to `True` and `False` sea_calendar["city"] = "seattle" bos_calendar["city"] = "boston" def clean_price(df): df = df.dropna() df["date"] = pd.to_datetime(df["date"]) df["price"] = df.price.apply(lambda x: x.replace("$", "").replace(",", "")).astype( float ) df["available"] = df["available"].replace({"t": True, "f": False}) return df sea_calendar = clean_price(sea_calendar) bos_calendar = clean_price(bos_calendar) calendar_df = pd.concat([bos_calendar, sea_calendar]) calendar_df.head() # Finnaly, is cleaning the dataset of `listing_group`, this will take a lots of things to do # #### For the `listing_group` # We must do step-by-step the following tasks: # >- Cleaning the values in the price columns # >- Filling the missing values of columns by using its averages or mode? # >- Decided which columns to drop or keep! def get_extra_people_fee(ser): if ser["extra_people"] == "$0.00": return 0.0 else: return 1.0 def clean_listing_df(df): df["price"] = df.price.apply(lambda x: x.replace("$", "").replace(",", "")).astype( float ) df["bathrooms"] = df["bathrooms"].fillna(df["bathrooms"].mode()[0]) df["bedrooms"] = df["bedrooms"].fillna(df["bedrooms"].mode()[0]) df["beds"] = df["beds"].fillna(df["beds"].mode()[0]) df["property_type"] = df["property_type"].fillna(df["property_type"].mode()[0]) df["extra_people_fee"] = df.apply(lambda x: get_extra_people_fee(x), axis=1) fill_avg_columns = [ "host_listings_count", "square_feet", "review_scores_rating", "review_scores_accuracy", "review_scores_cleanliness", "review_scores_checkin", "review_scores_communication", "review_scores_location", "review_scores_value", ] for column in fill_avg_columns: df[column].fillna(df[column].mean(), inplace=True) return df sea_listings = clean_listing_df(sea_listings) bos_listings = clean_listing_df(bos_listings) # # Answers question on Udacity # Pose at least three questions related to business or real-world applications of how the data could be used. # ## Q.1 Try to understand how much `AirBNB` homes are earning in certain `time frames` and `areas/ cities`. # >- The daily average price between 2 cities. # >>> Look at the following code and its result, we can see that "the average price in Seattle is lower than in Boston", but the total_revenue at Seattle is nearly approximate at in Boston. The second chart show that there are more reservations in Seattle than in Boston calendar_df_avg_daily = ( calendar_df.groupby(["date", "city"]).mean()[["price"]].reset_index() ) calendar_df_avg_daily = pd.pivot( calendar_df_avg_daily, values="price", index=["date"], columns="city" ) calendar_df_sum_daily = ( calendar_df.groupby(["date", "city"]).sum()[["price"]].reset_index() ) calendar_df_sum_daily = pd.pivot( calendar_df_sum_daily, values="price", index=["date"], columns="city" ) fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(23, 12)) calendar_df_avg_daily.plot( title="Average reservation_price at Seattle and Boston", ax=ax[0] ) calendar_df_sum_daily.plot( title="Total reservation_price at Seattle and Boston", ax=ax[1] ) plt.show() # Moreover, loot at the `average_price`, we have seen that the listing prices raise significantly in summer at `Seattle` and in autumn at Boston, probably, because of the fact that there are less listings available for reservation. There is also a raise in December at `Seattle` and in May at `Boston`. This tells us that summer and winter holidays should be the busiest times to visit `Seattle`, also autumn to visit `Boston`. # - So which certained time that the time-series attain highest and lowest in each charts? # >> The `avg_price` is lowest in `01, Apr 2016` at `Seattle` and in `06 Sep 2016` at `Boston` # >> The `avg_price` is highest in `02, Jan 2017` at `Seattle` and in `05 Sep 2017` at `Boston` pd.concat( [ calendar_df_avg_daily["boston"].dropna().reset_index().agg(["min", "max"]), calendar_df_avg_daily["seattle"].dropna().reset_index().agg(["min", "max"]), ] ) # The highest and lowest total prices per day at each city is the same with its average values pd.concat( [ calendar_df_sum_daily["boston"].dropna().reset_index().agg(["min", "max"]), calendar_df_sum_daily["seattle"].dropna().reset_index().agg(["min", "max"]), ] ) # >- The monthly average price between 2 cities. # The diagrams below shows us the prices depending on month. These graphs will demonstrate more detail than in the graph of `average_price per day` # >> At `Boston`, Jan2017 - Mar2017 is the time that the average price is lowest # >> At `Seattle`, Jan2016 - Mar2016 is the time that the average price is lowest fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(23, 9)) for idx, city in enumerate(["seattle", "boston"]): city_calendar_df = ( calendar_df[calendar_df.city == city] .set_index("date") .resample("M") .mean()[["price"]] .reset_index() ) city_calendar_df["month-year"] = city_calendar_df["date"].dt.month_name().apply( lambda x: str(x)[:3] ) + city_calendar_df["date"].dt.year.apply(lambda x: str(x)) city_calendar_df.plot( x="month-year", color=colors[idx], y="price", kind="barh", rot=45, ax=ax[idx], title=city, ) for p in ax[idx].patches: ax[idx].annotate( int(p.get_width()), (p.get_width(), p.get_y() + p.get_height() / 2), color="black", bbox=dict(boxstyle="round,pad=0.3", fc="violet", alpha=0.3), horizontalalignment="right", ) # >- Quarterly observed. # >> `1st quarter` is the quarter that the average prices is lowest in both `Seattle` (125 at `Q1-2016` and `136` at `Q1-2017`) and `Boston` (181 at `Q1-2017`). # >> `3rd quarter` is the quarter that the average prices is highest in both `Seattle` (148 at `Q3-2016`) and `Boston` (252 at `Q1-2017`). # >> Again, the quarterly_average_price in `Seattle` is lower than in `Boston` fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(23, 6)) for idx, city in enumerate(["seattle", "boston"]): city_calendar_df = ( calendar_df[calendar_df.city == city] .set_index("date") .resample("Q") .mean()[["price"]] .reset_index() ) city_calendar_df["Quarter"] = ( "Q" + city_calendar_df["date"].dt.quarter.apply(lambda x: str(x)) + "-" + city_calendar_df["date"].dt.year.apply(lambda x: str(x)) ) city_calendar_df.plot( x="Quarter", color=colors[idx], y="price", kind="barh", rot=45, ax=ax[idx], title=city, ) for p in ax[idx].patches: ax[idx].annotate( int(p.get_width()), (p.get_width() / 2, p.get_y() + p.get_height() / 2), color="black", bbox=dict(boxstyle="round,pad=0.3", fc="violet", alpha=0.3), ) # ## Q2. Examine the Total_price by top20 listing_id at each city and its reservation days # Top 20 listing_id in `Seattle` has the longer reservation days but the total_price is lower than in `Boston` fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(23, 18)) ax = ax.ravel() for idx, city in enumerate(["seattle", "boston"]): df = calendar_df.groupby(["city", "listing_id"]).sum()[["price"]].reset_index() df = df[df.city == city].sort_values(by="price", ascending=False)[:20] list_id = df.listing_id df2 = calendar_df[calendar_df.listing_id.isin(list_id)][ ["date", "listing_id", "price"] ] x = ( df2.groupby("listing_id").max()[["date"]] - df2.groupby("listing_id").min()[["date"]] ) df["during_days"] = (x.astype("timedelta64[h]") // 24).astype(float).values.ravel() df.plot( x="listing_id", y="price", kind="barh", color="c", title=city + "_top20_listing_id take highest total_price", ax=ax[2 * idx], ) for p in ax[2 * idx].patches: ax[2 * idx].annotate( int(p.get_width()), (p.get_width(), p.get_y() + p.get_height() / 2), color="black", bbox=dict(boxstyle="round,pad=0.3", fc="violet", alpha=0.3), ) df[["listing_id", "during_days"]].plot( x="listing_id", y="during_days", kind="barh", color="y", title="Corresponding reservation_days" + city, ax=ax[2 * idx + 1], ) for p in ax[2 * idx + 1].patches: ax[2 * idx + 1].annotate( int(p.get_width()), (p.get_width() - 90, p.get_y() + p.get_height() / 2), bbox=dict(boxstyle="round,pad=0.3", fc="violet", alpha=0.3), ) # And how about the `average_price` by `Number of listing_id`? # >> The majority of listings is concentrated around `50 - 150 USD` in `Seattle` and about `50-250 $` in `Boston` fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(23, 11)) for idx, city in enumerate(["seattle", "boston"]): df = calendar_df[calendar_df.city == city] mean_price_for_listing = df.groupby("listing_id").mean()["price"] ax[idx].hist(mean_price_for_listing, color="c", bins=50) ax[idx].set_xticks(np.arange(0, df["price"].max(), step=100 * (1 + idx))) ax[idx].set_ylabel("Number of listings_id") ax[idx].set_xlabel("Price (in $)") ax[idx].set_title("Number of listings depending on price, at " + city) # ## Q3. Try to understand if there is anything about the properties that helps you predict price, based on the 2 datasets: `boston_listings` and `seattle_listings`. # First, we must examine the common-features between this 2 dataset: `boston_listings` and `seattle_listings` bos_cols_ls = sorted(list(bos_listings.columns)) sea_cols_ls = sorted(list(sea_listings.columns)) same = set(bos_cols_ls).intersection(set(sea_cols_ls)) print(sorted(list(same))) len(list(same)) columns_to_drop = [ "available", "host_id", "host_location", "host_acceptance_rate", "host_neighbourhood", "host_total_listings_count", "weekly_price", "monthly_price", "security_deposit", "cleaning_fee", "calendar_updated", "listing_url", "last_scraped", "scrape_id", "name", "summary", "space", "description", "experiences_offered", "street", "neighbourhood", "neighbourhood_cleansed", "zipcode", "neighborhood_overview", "notes", "transit", "thumbnail_url", "medium_url", "picture_url", "xl_picture_url", "host_url", "host_name", "host_about", "host_thumbnail_url", "host_picture_url", "city", "state", "market", "country_code", "country", "latitude", "longitude", "is_location_exact", "has_availability", "calendar_last_scraped", "first_review", "last_review", "requires_license", "license", "jurisdiction_names", "price_y", "reviews_per_month", ] same = set(same).difference(set(columns_to_drop)) print(sorted(list(same))) # When forecasting the price of house, we have seen that these feature can be strongly affect to the price are # >> Numeric features: `'bathrooms'`, `'beds'`, `'square_feet'`, `'availability_30'`, `'availability_365'`, `'availability_60'`, `'availability_90'`, `'number_of_reviews'`, `'accommodates'`, `'review_scores_rating'`, `review_scores_location'` and `'host_listings_count'` # >> Category features: `'bed_type'`, `'smart_location'`, `'property_type'`, `'room_type'` and may be `'neighbourhood_group_cleansed'` numr_feat = sorted( [ "bathrooms", "beds", "square_feet", "availability_30", "availability_365", "availability_60", "availability_90", "number_of_reviews", "accommodates", "review_scores_rating", "extra_people_fee", "price", "review_scores_location", "host_listings_count", ] ) cate_feat = [ "bed_type", "smart_location", "property_type", "room_type", "neighbourhood_group_cleansed", ] # - Check again which features in the numeric_features can be influenced the prices # To solve this, I will use the heatmap on the correlation matrix. import seaborn as sns bos_listings = bos_listings[same] sea_listings = sea_listings[same] dfs = [bos_listings, sea_listings] cities = ["Boston", "Seattle"] fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(23, 11)) for idx, df in enumerate(dfs): corrs = df[numr_feat].corr() sns.heatmap(corrs, cbar=True, annot=True, ax=ax[idx]) ax[idx].set_title("Correlations heatmap at " + cities[idx]) # So, # >- At `Seattle`, the numeric features that strongly correlated to `price` be : `"accomodates"`: 0.67, `"beds"` : 0.59, and `"bathrooms`: 0.52; the other features is weakly-correlated to `price`. # >- At `Boston`, the numeric features that strongly correlated to `price` be : `"accomodates"`: 0.44, `"beds"` : 0.33, next be "bathrooms", "review_scores_rating" and "availability_30". The `"availability_30"` at `Boston` is about `0.13` shows that the reservation in this city is almost no longer than 1 month. # Now, how about the category features? dfs = [bos_listings, sea_listings] cities = ["Boston", "Seattle"] fig, ax = plt.subplots(nrows=4, ncols=2, figsize=(23, 19)) ax = ax.ravel() for k in range(2): df = dfs[k][cate_feat] for q in range(len(cate_feat) - 1): z = dfs[k].groupby(cate_feat[q]).mean()[["price"]] z.sort_values(by="price", ascending=False)[:10].reset_index().plot( x=cate_feat[q], y="price", title=cities[k] + " " + cate_feat[q], color="y", kind="barh", ax=ax[q * 2 + k], ) # This is easy to see that: # >> In the column `bed_type`, the value `Real_bed` has the highest average_price at both cities, while the `Airbed` is lowest in `Boston` and `Couch` is lowest in `Seattle`. # >> In the column `room_type`, the value `Entire home/apt` has the highest average_price at both cities, and obviously the `shared_room` is lowest. # >> In the column `property_type`, the value `Guesthouse` has the highest average_price in `Boston` while in `Seattle` is `Boat`, the lowest average_price in `Boston` is `Bed & breakfast` while `Cabin` is the lowest in `Seattle`. # **But, we must consider carefully the last features: `neighbourhood_group_cleansed`** # In `Boston` city, this feature totally contains the missing values, indeed; pd.DataFrame( bos_listings[cate_feat].isnull().sum() / bos_listings.shape[0] * 100 ).rename(columns={0: "percentage of null"}) # Hence, we must drop the column `neighbourhood_group_cleansed`, but do you wonder how it affect to the average_price in `Seattle`? Look at the chart below! fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 5)) sea_listings.groupby("neighbourhood_group_cleansed").mean()[["price"]].plot( kind="bar", rot=45, color="y", ax=ax ) for p in ax.patches: ax.annotate(str(p.get_height())[:5], (p.get_x() * 1.005, p.get_height() * 1.005)) # On the diagram above (at `Seattle` city) we can see that prices differ depending on the location (neighbourhood). The highest average prices are in 3 areas: `Magnolia`, `Queen Anne` and `Downtown`; while the lowest is in `Delridge`, which is not surprising # #### predict price # Remind that we will drop the column `neighbourhood_group_cleansed`, we are able to apply `Machine learning methods` to see which features in dataset influence the price the most. In order to do this we will train two popular models (`Random Forest regressor [bagging]` and `Gradient Boosting regressor [boosting]`) based on decision trees and look at resulting feature importances. from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import ( mean_squared_error as mse, r2_score, mean_absolute_error as mae, ) def mape(actual, predict): return np.mean(np.abs(actual - predict) / actual) clfs = [ RandomForestRegressor( n_estimators=200, max_depth=9, criterion="mse", random_state=42, n_jobs=-1 ), GradientBoostingRegressor( n_estimators=200, learning_rate=0.05, subsample=0.8, max_depth=7 ), ] def display(df): methods = ["Random Forest", "Gradient Boosting"] cat_columns = list(df.select_dtypes(include=["object"]).columns) df_cate = df[cat_columns] X = pd.concat( [ df[numr_feat], pd.get_dummies(df_cate, prefix_sep="_is_", dummy_na=True, drop_first=True), ], axis=1, ).drop(columns="price") y = df.price X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) fig, ax = plt.subplots( nrows=3, ncols=2, figsize=(25, 17.5), gridspec_kw={"height_ratios": [2, 3, 5]} ) ax = ax.ravel() for k, clf in enumerate(clfs): model = clf.fit(X_train, y_train) feature_importance = 100 * model.feature_importances_ sorted_idx = np.argsort(feature_importance) dat = pd.DataFrame( { "index": list(x[:20] + "..." for x in X_train.columns[sorted_idx]), "feature importance": feature_importance[sorted_idx], } )[-10:] train_pred = model.predict(X_train) test_pred = model.predict(X_test) # plot scor = pd.DataFrame( { "score_names": ["MAPE", "R2_score"], "training": [mape(y_train, train_pred), r2_score(y_train, train_pred)], "test_set": [mape(y_test, test_pred), r2_score(y_test, test_pred)], } ).set_index("score_names") scor.plot(ax=ax[k], kind="barh") err = ( pd.DataFrame( { "score_names": ["MAE", "MSE", "RMSE"], "training": [ mae(y_train, train_pred), mse(y_train, train_pred), np.sqrt(mse(y_train, train_pred)), ], "test_set": [ mae(y_test, test_pred), mse(y_test, test_pred), np.sqrt(mse(y_test, test_pred)), ], } ) .set_index("score_names") .plot(ax=ax[k + 2], kind="barh") ) dat.set_index("index").plot( ax=ax[k + 4], rot=45, title="Top10 feature importace using " + methods[k], kind="barh", ) # > Forecasting in Seattle. display(dfs[1]) # >- And `Boston` display(dfs[0]) # #### Comments on Question 3. # These scores indicated that the model make `overfitting`, we must remove some features is not related / not correlated the price. # Moreover, to find the best `r2_score`, we can use grid_search CV / hyper-tuning parameters to solve this # ## Q4. Can you find negative and positive reviews based on text? # Firstly, viewing each comment line by line is `impossible` for k in range(10): print(k, 29 * "-") print("Seattle: \t", sea_reviews.comments[k]) print("Boston: \t", bos_reviews.comments[k]) # - We saw that the keywords in the first 10 comments is # >- "Perfect", "great", "beautiful", "well", "hola", "pleasant" etc... to describe the `positive` # >- "issue", "terrible", "stupid".... for `negative` # But I want to look over them, so I will use N-grams analytics on the top 100 of `uni-grams`, `bi-grams` and `tri-grams` to solve this bos_reviews["city"] = "boston" sea_reviews["city"] = "seattle" df_reviews = pd.concat([bos_reviews, sea_reviews]) df_reviews.head() # #### Now, look at the code below # First is the `Uni-gram`, from the charts below, we have seen that # >- The words `great` is appeared almost in the comments_review in both cities; # >- The second and the third popular words is `stay` and `place` in both cities # >- But the next 4th popular word is `us` in `Seattle` while in `Boston` was `apartment` # >- etc. from wordcloud import STOPWORDS from collections import defaultdict import string def ngrams_generative(text, n_grams=1): token = [ token for token in str(text).lower().split(" ") if token != "" if token not in STOPWORDS ] ngrams = zip(*[token[i:] for i in range(n_grams)]) return [" ".join(ngram) for ngram in ngrams] def Ngram_topN(dataset, n_grams, top_N=100): bos_ngrams = defaultdict(int) sea_ngrams = defaultdict(int) is_sea = dataset["city"] == "seattle" for mes in dataset[is_sea]["comments"]: for word in ngrams_generative(mes, n_grams): sea_ngrams[word] += 1 for mes in dataset[~is_sea]["comments"]: for word in ngrams_generative(mes, n_grams): bos_ngrams[word] += 1 sea_ngrams = pd.DataFrame(sorted(sea_ngrams.items(), key=lambda x: x[1])[::-1]) bos_ngrams = pd.DataFrame(sorted(bos_ngrams.items(), key=lambda x: x[1])[::-1]) fig, ax = plt.subplots(ncols=2, figsize=(20, (n_grams + 1) * top_N // 10), dpi=100) plt.tight_layout() sns.barplot( y=sea_ngrams[0].values[:top_N], x=sea_ngrams[1].values[:top_N], ax=ax[0], color="red", ) sns.barplot( y=bos_ngrams[0].values[:top_N], x=bos_ngrams[1].values[:top_N], ax=ax[1], color="green", ) for i in range(2): ax[i].spines["right"].set_visible(False) ax[i].set_xlabel("") ax[i].set_ylabel("") ax[i].tick_params(axis="x", labelsize=12) ax[i].tick_params(axis="y", labelsize=12) Ngram_used = {1: "unigram", 2: "bigrams", 3: "trigrams"} ax[0].set_title( f"Top {top_N} most common {Ngram_used[n_grams]} of comments_review in Seattle", fontsize=15, ) ax[1].set_title( f"Top {top_N} most common {Ngram_used[n_grams]} of comments_review in Boston", fontsize=15, ) Ngram_topN(df_reviews, 1, 75) # How about the `Bi-grams` of the `comments` # >- Wow, the most 4 popular `bi-grams` in both cities is the same : `definitely stay`, `highly recommended`, `walking distance` and `stay again` # >- Almost the top 75-bigrams reflect the positive comments with the words like: `highly recommended`, `great location`, `great place`, `great host`, `great location`, `clean well`. Ngram_topN(df_reviews, 2, 75) # How about the `Tri-grams` of the `comments` Ngram_topN(df_reviews, 3, 100) # => From this, we can examine easier which words meant positve and negative in the `comments_reviews`. # ## Q5. Make 2 time-series model to forecasting the difference of the reservation_prices between 2 cities # Our dataset is in 1 year only, we can make a ARIMA, SARIMA, SARIMAX model or something like this. # But here, in this case, we can use a assumption of Markov properties: `the price of present is depended on the avg_price of the previous 3 days or 1 week` and I will use another Machine Learning models like `Random Forest regressor` and `Gradient Boosting` # #### Step 1. Making a rolling-window # Here, I let the `window_size = 3` from statsmodels.tsa.arima_model import ARIMA cities = ["boston", "seattle"] df = calendar_df[calendar_df.city == cities[0]] df["month"] = df["date"].dt.month df = df.set_index("date")[["month", "price"]] df = df.resample("D").mean() window_size = 3 df["previous_roll_avg"] = ( df["price"].rolling(window_size, win_type="gaussian").mean(std=2) ) df["previous_roll_std"] = df["price"].rolling(window_size).std() df.head(10) # #### Making a shifted-datetime # For a fixed `window_size = 3`, I will establish a model that based on the **3 previous days** for predicting the average_price today, so we must **shift** the rolling_window by 1 steps! Look at the following output # For example, we will forecast the price of `04 Jul 2017` based on the prices in `01 - 03, Jul 2017` df[["previous_roll_avg", "previous_roll_std"]] = df[ ["previous_roll_avg", "previous_roll_std"] ].shift(periods=1) df["previous k_days price"] = df["price"].shift(periods=window_size) df.head(10) # #### Replacing the missing_values by its averages # Here, we will replace the missing values (`NaN`) by the averages values at each column that contains missing values. df = df.fillna(df.mean()) df[["price", "previous_roll_avg"]].plot(figsize=(10, 5)) # #### Making a model # Now, I create `Random Forest` to make a forecasting model! X = df.drop(columns="price") y = df.price X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33) clfs = [ RandomForestRegressor(n_estimators=200, max_depth=9, criterion="mse", n_jobs=-1), GradientBoostingRegressor( n_estimators=200, learning_rate=0.05, subsample=0.8, max_depth=7 ), ] clf = clfs[0] clf.fit(X_train, y_train) preds = y_test.reset_index() preds["forecast"] = clf.predict(X_test) preds.set_index("date").plot(figsize=(20, 5)) r2_score(y_test, clf.predict(X_test)) # So, the `r2_score` is about 0.88 when using `Random forest regressor` # #### Wraping up together ## input window_size = 3 df = calendar_df[calendar_df.city == cities[1]] ## function : def display_timeseries(window_size, city): df = calendar_df[calendar_df.city == city] df["month"] = df["date"].dt.month df = df.set_index("date")[["month", "price"]] df = df.resample("D").mean() df["previous_roll_avg"] = ( df["price"].rolling(window_size, win_type="gaussian").mean(std=2) ) df["previous_roll_std"] = df["price"].rolling(window_size).std() df[["previous_roll_avg", "previous_roll_std"]] = df[ ["previous_roll_avg", "previous_roll_std"] ].shift(periods=1) df["previous k_days price"] = df["price"].shift(periods=window_size) df = df.fillna(df.mean()) X = df.drop(columns="price") y = df.price X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33) clfs = [ RandomForestRegressor( n_estimators=200, max_depth=9, criterion="mse", n_jobs=-1 ), GradientBoostingRegressor( n_estimators=200, learning_rate=0.05, subsample=0.8, max_depth=7 ), ] methods = ["Random forest regressor", "Gradient Boosting"] fig, ax = plt.subplots( nrows=5, ncols=2, figsize=(24, 35), gridspec_kw={"height_ratios": [2, 3, 2.5, 2, 2]}, ) ax = ax.ravel() for k, model in enumerate(clfs): model.fit(X_train, y_train) feature_importance = 100 * model.feature_importances_ sorted_idx = np.argsort(feature_importance) dat = pd.DataFrame( { "index": list(x[:20] + "..." for x in X_train.columns[sorted_idx]), "feature importance": feature_importance[sorted_idx], } ) train_pred = model.predict(X_train) test_pred = model.predict(X_test) # plot scor = pd.DataFrame( { "score_names": ["MAPE", "R2_score"], "training": [mape(y_train, train_pred), r2_score(y_train, train_pred)], "test_set": [mape(y_test, test_pred), r2_score(y_test, test_pred)], } ).set_index("score_names") scor.plot( ax=ax[k], title="R2_score & MAPE using " + methods[k] + " at " + city, kind="barh", ) err = ( pd.DataFrame( { "score_names": ["MAE", "MSE", "RMSE"], "training": [ mae(y_train, train_pred), mse(y_train, train_pred), np.sqrt(mse(y_train, train_pred)), ], "test_set": [ mae(y_test, test_pred), mse(y_test, test_pred), np.sqrt(mse(y_test, test_pred)), ], } ) .set_index("score_names") .plot(ax=ax[k + 2], kind="barh") ) dat.set_index("index").plot( ax=ax[k + 4], rot=45, title="Feature importace using " + methods[k], kind="barh", ) preds = y_train.reset_index() preds["forecast"] = model.predict(X_train) preds.set_index("date").plot( ax=ax[k + 6], title="forecasting on training set using " + methods[k] ) preds = y_test.reset_index() preds["forecast"] = model.predict(X_test) preds.set_index("date").plot( ax=ax[k + 8], title="forecasting on test set using " + methods[k] ) display_timeseries(3, city=cities[0]) # Yeah! Our forecasting model is pretty good in `Seattle` when the `R2_score` is over 0.85 and the `MAPE` is lower than 0.02. And how about in `Boston`? # >> In the training set, the model is nearly concided when using `Gradient Boosting` but the `R2_score` on the `test_set` when using `Random Forest` is better!! # >> The `MAE < 8` on both model showed that our algorithms is right (noting that the average_price at Seattle is over `180$` display_timeseries(3, city=cities[0])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/549/69549867.ipynb
null
null
[{"Id": 69549867, "ScriptId": 18797262, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4182544, "CreationDate": "08/01/2021 11:58:44", "VersionNumber": 6.0, "Title": "udacity_nhandv6", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 698.0, "LinesInsertedFromPrevious": 56.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 642.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Introduction # This topic is my practices on the [Learning path of Udacity](https://classroom.udacity.com/nanodegrees/nd025-ent/parts/d52fd59b-fa33-4cd0-8d96-11e461669484/modules/07d3f5fa-3026-4414-b7a6-b0de6c628aea/lessons/2dac695b-c799-46b4-9b3b-c6ff460dbbf3/concepts/d0f0c9ed-424d-4360-aa59-811b52c54304) # The source dataset is taken from [Seattle Airbnb](https://www.kaggle.com/airbnb/seattle) and [Boston Airbnb](https://www.kaggle.com/airbnb/boston) # According to the requirement `"pose at least three questions related to business or real-world applications of how the data could be used"` of Udacity, I had figured out 5 questions in this topic # >- 1) Try to understand how much AirBNB homes are earning in certain time frames and areas/ cities. # >- 2) Examine the Total_price by top20 listing_id at each city and its reservation days. # >- 3) Try to understand if there is anything about the properties that helps you predict price, based on the 2 datasets: boston_listings and seattle_listings. # >- 4) Can you find negative and positive reviews based on text? # >- 5) Make 2 time-series model to forecasting the difference of the reservation_prices between 2 cities. # Follow that, we can understand the difference between 2 cities by looking at these question, doing EDA and also comparing the Machine Learning models. Moreover, these 5 questions is applied many problems: # >- Time-series forecasting in Question 5 # >- Regression in Question 3 # >- Ngrams-analytics (a part of NLP) in Question 4. # Name: Do Van Nhan # Account: NhanDV6 # Now, we must import the necessary libraries then load our dataset import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") sea_calendar = pd.read_csv("../input/compare/seattle_calendar.csv") bos_calendar = pd.read_csv("../input/compare/boston_calendar.csv") sea_reviews = pd.read_csv("../input/compare/seattle_reviews.csv") bos_reviews = pd.read_csv("../input/compare/boston_reviews.csv") sea_listings = pd.read_csv("../input/compare/seattle_listings.csv") bos_listings = pd.read_csv("../input/compare/boston_listings.csv") # # EDA # ### Viewing size of dataset # ##### Figure out our dataset: Number of columns, number of rows and memory usages dfs = [sea_calendar, sea_reviews, sea_listings, bos_calendar, bos_reviews, bos_listings] subtit = ["Number of columns", "Number of rows", "Memory usage"] df = pd.DataFrame( { "Nb_cols": [df.shape[1] for df in dfs], "Nb_rows": [df.shape[0] for df in dfs], "Memory_usage (Mb)": [ round(df.memory_usage().sum() / 1024**2, 3) for df in dfs ], "dataset": [ "Seattle_calendar", "Seattle_reviews", "Seattle_listings", "Boston_calendar", "Boston_reviews", "Boston_listings", ], } ) # Visualize fig, ax = plt.subplots(nrows=3, ncols=1, figsize=(20, 10)) for k, c in enumerate(["c", "y", "orange"]): df.plot( x="dataset", title=subtit[k], y=list(df.columns)[k], kind="barh", color=c, ax=ax[k], ) for p in ax[k].patches: ax[k].annotate( str(p.get_width()), (p.get_width() + 1, p.get_y() + p.get_height() - 0.25) ) plt.show() # ## 1. Looking at the missing values def countNA_merge_cities(boston_df, seattle_df): merge_df = pd.DataFrame( { "columns": list(seattle_df.columns), "isnull_Boston": boston_df.isnull().sum().values, "isnull_Seattle": seattle_df.isnull().sum().values, } ) return merge_df def percentage_NA_merge_cities(boston_df, seattle_df): merge_df = pd.DataFrame( { "columns": list(seattle_df.columns), "isnull_Boston": 100 * boston_df.isnull().sum().values / len(boston_df), "isnull_Seattle": 100 * seattle_df.isnull().sum().values / len(seattle_df), } ).round(2) return merge_df # The first 2 groups `calendar` and `reviews` in 2 cities `Boston` and `Seattle` has the same schemas. # >> In the `calendar_df: Boston and Seattle`, they have the same 4 columns: `price`, `available`, `date` and `listing_id` # >> In the `review_df: Boston and Seattle`, they have the same 5 columns: `comments`, `reviewer_name`, `reviewer_id`, `id` and `listing_id`. seattle_df = [sea_calendar, sea_reviews] boston_df = [bos_calendar, bos_reviews] sub_title = ["calendar_df", "review_df"] fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(23, 8)) ax = ax.ravel() for k in range(2): calendar_df = countNA_merge_cities(seattle_df[k], boston_df[k]) calendar_pr = percentage_NA_merge_cities(seattle_df[k], boston_df[k]) calendar_df.plot( x="columns", kind="barh", ax=ax[2 * k], title="Count missing values in {}".format(sub_title[k]), ) for p in ax[2 * k].patches: ax[2 * k].annotate( int(p.get_width()), (p.get_width() + 1, p.get_y() + p.get_height() - 0.25) ) calendar_pr.plot( x="columns", kind="barh", ax=ax[2 * k + 1], title="Percentage of missing values in {}".format(sub_title[k]), ) for p in ax[2 * k + 1].patches: ax[2 * k + 1].annotate( str(p.get_width()), (p.get_width(), p.get_y() + p.get_height() - 0.25) ) plt.show() # #### Commnent 1. The number of missing values in `Seattle_df` is higher than in `Boston_df` # ### How about the group `listing` # There are many columns have too much missing values in the listing dataframes, noting that these datasets has only 3585 and 3884 rows, considered `Boston_listings` and `Seattle_listings` respectively. group_list = [sea_listings, bos_listings] colors = ["c", "y"] titles = ["missing values in Seattle_listing_df", "missing values in Boston_listing_df"] fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(25, 20)) for k in range(2): count_null = group_list[k].isnull().sum().sort_values(ascending=True) real_null = count_null[count_null > 0] real_null.plot(title=titles[k], kind="barh", color=colors[k], rot=45, ax=ax[k]) for p in ax[k].patches: ax[k].annotate( str(p.get_width()), (p.get_width() * 1.01 + 100, p.get_y() + p.get_height() - 0.5), color="black", bbox=dict(boxstyle="round,pad=0.3", fc="violet", alpha=0.3), horizontalalignment="right", ) plt.show() # ### Cleaning & Merging dataset to compare # #### For `reviews_group` # First, viewing our dataset! # Look at the output below, the column `daye` is denoted `object` while we expected its type must be `datetime`!!! print( "Seattle_reviews.csv", sea_reviews.dtypes, sea_reviews["date"].max(), sea_reviews["date"].min(), ) print( "Boston_reviews.csv", bos_reviews.dtypes, bos_reviews["date"].max(), bos_reviews["date"].min(), ) sea_reviews.head(3) # So we must # - convert the type in column `date` to `datetime` # - don't change anything in the other columns def date_trsfr_reviews_df(df): df["date"] = pd.to_datetime(df["date"]) return df reviews_df = date_trsfr_reviews_df(sea_reviews) reviews_df.head(2) # After cleaning the `reviews datasets`, then now is the time for viewing what we have in the `calendar dataframes`? print(sea_calendar.available.value_counts()) sea_calendar.head(2) # The columns `available` has 2 values is `t` (for `True`) and `f` (for `False`) # #### For the calendar_group: # >- Remove the `$` and `,` in `price` then convert the values to `float` # >- Replace `t, f` in `available` to `True` and `False` sea_calendar["city"] = "seattle" bos_calendar["city"] = "boston" def clean_price(df): df = df.dropna() df["date"] = pd.to_datetime(df["date"]) df["price"] = df.price.apply(lambda x: x.replace("$", "").replace(",", "")).astype( float ) df["available"] = df["available"].replace({"t": True, "f": False}) return df sea_calendar = clean_price(sea_calendar) bos_calendar = clean_price(bos_calendar) calendar_df = pd.concat([bos_calendar, sea_calendar]) calendar_df.head() # Finnaly, is cleaning the dataset of `listing_group`, this will take a lots of things to do # #### For the `listing_group` # We must do step-by-step the following tasks: # >- Cleaning the values in the price columns # >- Filling the missing values of columns by using its averages or mode? # >- Decided which columns to drop or keep! def get_extra_people_fee(ser): if ser["extra_people"] == "$0.00": return 0.0 else: return 1.0 def clean_listing_df(df): df["price"] = df.price.apply(lambda x: x.replace("$", "").replace(",", "")).astype( float ) df["bathrooms"] = df["bathrooms"].fillna(df["bathrooms"].mode()[0]) df["bedrooms"] = df["bedrooms"].fillna(df["bedrooms"].mode()[0]) df["beds"] = df["beds"].fillna(df["beds"].mode()[0]) df["property_type"] = df["property_type"].fillna(df["property_type"].mode()[0]) df["extra_people_fee"] = df.apply(lambda x: get_extra_people_fee(x), axis=1) fill_avg_columns = [ "host_listings_count", "square_feet", "review_scores_rating", "review_scores_accuracy", "review_scores_cleanliness", "review_scores_checkin", "review_scores_communication", "review_scores_location", "review_scores_value", ] for column in fill_avg_columns: df[column].fillna(df[column].mean(), inplace=True) return df sea_listings = clean_listing_df(sea_listings) bos_listings = clean_listing_df(bos_listings) # # Answers question on Udacity # Pose at least three questions related to business or real-world applications of how the data could be used. # ## Q.1 Try to understand how much `AirBNB` homes are earning in certain `time frames` and `areas/ cities`. # >- The daily average price between 2 cities. # >>> Look at the following code and its result, we can see that "the average price in Seattle is lower than in Boston", but the total_revenue at Seattle is nearly approximate at in Boston. The second chart show that there are more reservations in Seattle than in Boston calendar_df_avg_daily = ( calendar_df.groupby(["date", "city"]).mean()[["price"]].reset_index() ) calendar_df_avg_daily = pd.pivot( calendar_df_avg_daily, values="price", index=["date"], columns="city" ) calendar_df_sum_daily = ( calendar_df.groupby(["date", "city"]).sum()[["price"]].reset_index() ) calendar_df_sum_daily = pd.pivot( calendar_df_sum_daily, values="price", index=["date"], columns="city" ) fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(23, 12)) calendar_df_avg_daily.plot( title="Average reservation_price at Seattle and Boston", ax=ax[0] ) calendar_df_sum_daily.plot( title="Total reservation_price at Seattle and Boston", ax=ax[1] ) plt.show() # Moreover, loot at the `average_price`, we have seen that the listing prices raise significantly in summer at `Seattle` and in autumn at Boston, probably, because of the fact that there are less listings available for reservation. There is also a raise in December at `Seattle` and in May at `Boston`. This tells us that summer and winter holidays should be the busiest times to visit `Seattle`, also autumn to visit `Boston`. # - So which certained time that the time-series attain highest and lowest in each charts? # >> The `avg_price` is lowest in `01, Apr 2016` at `Seattle` and in `06 Sep 2016` at `Boston` # >> The `avg_price` is highest in `02, Jan 2017` at `Seattle` and in `05 Sep 2017` at `Boston` pd.concat( [ calendar_df_avg_daily["boston"].dropna().reset_index().agg(["min", "max"]), calendar_df_avg_daily["seattle"].dropna().reset_index().agg(["min", "max"]), ] ) # The highest and lowest total prices per day at each city is the same with its average values pd.concat( [ calendar_df_sum_daily["boston"].dropna().reset_index().agg(["min", "max"]), calendar_df_sum_daily["seattle"].dropna().reset_index().agg(["min", "max"]), ] ) # >- The monthly average price between 2 cities. # The diagrams below shows us the prices depending on month. These graphs will demonstrate more detail than in the graph of `average_price per day` # >> At `Boston`, Jan2017 - Mar2017 is the time that the average price is lowest # >> At `Seattle`, Jan2016 - Mar2016 is the time that the average price is lowest fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(23, 9)) for idx, city in enumerate(["seattle", "boston"]): city_calendar_df = ( calendar_df[calendar_df.city == city] .set_index("date") .resample("M") .mean()[["price"]] .reset_index() ) city_calendar_df["month-year"] = city_calendar_df["date"].dt.month_name().apply( lambda x: str(x)[:3] ) + city_calendar_df["date"].dt.year.apply(lambda x: str(x)) city_calendar_df.plot( x="month-year", color=colors[idx], y="price", kind="barh", rot=45, ax=ax[idx], title=city, ) for p in ax[idx].patches: ax[idx].annotate( int(p.get_width()), (p.get_width(), p.get_y() + p.get_height() / 2), color="black", bbox=dict(boxstyle="round,pad=0.3", fc="violet", alpha=0.3), horizontalalignment="right", ) # >- Quarterly observed. # >> `1st quarter` is the quarter that the average prices is lowest in both `Seattle` (125 at `Q1-2016` and `136` at `Q1-2017`) and `Boston` (181 at `Q1-2017`). # >> `3rd quarter` is the quarter that the average prices is highest in both `Seattle` (148 at `Q3-2016`) and `Boston` (252 at `Q1-2017`). # >> Again, the quarterly_average_price in `Seattle` is lower than in `Boston` fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(23, 6)) for idx, city in enumerate(["seattle", "boston"]): city_calendar_df = ( calendar_df[calendar_df.city == city] .set_index("date") .resample("Q") .mean()[["price"]] .reset_index() ) city_calendar_df["Quarter"] = ( "Q" + city_calendar_df["date"].dt.quarter.apply(lambda x: str(x)) + "-" + city_calendar_df["date"].dt.year.apply(lambda x: str(x)) ) city_calendar_df.plot( x="Quarter", color=colors[idx], y="price", kind="barh", rot=45, ax=ax[idx], title=city, ) for p in ax[idx].patches: ax[idx].annotate( int(p.get_width()), (p.get_width() / 2, p.get_y() + p.get_height() / 2), color="black", bbox=dict(boxstyle="round,pad=0.3", fc="violet", alpha=0.3), ) # ## Q2. Examine the Total_price by top20 listing_id at each city and its reservation days # Top 20 listing_id in `Seattle` has the longer reservation days but the total_price is lower than in `Boston` fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(23, 18)) ax = ax.ravel() for idx, city in enumerate(["seattle", "boston"]): df = calendar_df.groupby(["city", "listing_id"]).sum()[["price"]].reset_index() df = df[df.city == city].sort_values(by="price", ascending=False)[:20] list_id = df.listing_id df2 = calendar_df[calendar_df.listing_id.isin(list_id)][ ["date", "listing_id", "price"] ] x = ( df2.groupby("listing_id").max()[["date"]] - df2.groupby("listing_id").min()[["date"]] ) df["during_days"] = (x.astype("timedelta64[h]") // 24).astype(float).values.ravel() df.plot( x="listing_id", y="price", kind="barh", color="c", title=city + "_top20_listing_id take highest total_price", ax=ax[2 * idx], ) for p in ax[2 * idx].patches: ax[2 * idx].annotate( int(p.get_width()), (p.get_width(), p.get_y() + p.get_height() / 2), color="black", bbox=dict(boxstyle="round,pad=0.3", fc="violet", alpha=0.3), ) df[["listing_id", "during_days"]].plot( x="listing_id", y="during_days", kind="barh", color="y", title="Corresponding reservation_days" + city, ax=ax[2 * idx + 1], ) for p in ax[2 * idx + 1].patches: ax[2 * idx + 1].annotate( int(p.get_width()), (p.get_width() - 90, p.get_y() + p.get_height() / 2), bbox=dict(boxstyle="round,pad=0.3", fc="violet", alpha=0.3), ) # And how about the `average_price` by `Number of listing_id`? # >> The majority of listings is concentrated around `50 - 150 USD` in `Seattle` and about `50-250 $` in `Boston` fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(23, 11)) for idx, city in enumerate(["seattle", "boston"]): df = calendar_df[calendar_df.city == city] mean_price_for_listing = df.groupby("listing_id").mean()["price"] ax[idx].hist(mean_price_for_listing, color="c", bins=50) ax[idx].set_xticks(np.arange(0, df["price"].max(), step=100 * (1 + idx))) ax[idx].set_ylabel("Number of listings_id") ax[idx].set_xlabel("Price (in $)") ax[idx].set_title("Number of listings depending on price, at " + city) # ## Q3. Try to understand if there is anything about the properties that helps you predict price, based on the 2 datasets: `boston_listings` and `seattle_listings`. # First, we must examine the common-features between this 2 dataset: `boston_listings` and `seattle_listings` bos_cols_ls = sorted(list(bos_listings.columns)) sea_cols_ls = sorted(list(sea_listings.columns)) same = set(bos_cols_ls).intersection(set(sea_cols_ls)) print(sorted(list(same))) len(list(same)) columns_to_drop = [ "available", "host_id", "host_location", "host_acceptance_rate", "host_neighbourhood", "host_total_listings_count", "weekly_price", "monthly_price", "security_deposit", "cleaning_fee", "calendar_updated", "listing_url", "last_scraped", "scrape_id", "name", "summary", "space", "description", "experiences_offered", "street", "neighbourhood", "neighbourhood_cleansed", "zipcode", "neighborhood_overview", "notes", "transit", "thumbnail_url", "medium_url", "picture_url", "xl_picture_url", "host_url", "host_name", "host_about", "host_thumbnail_url", "host_picture_url", "city", "state", "market", "country_code", "country", "latitude", "longitude", "is_location_exact", "has_availability", "calendar_last_scraped", "first_review", "last_review", "requires_license", "license", "jurisdiction_names", "price_y", "reviews_per_month", ] same = set(same).difference(set(columns_to_drop)) print(sorted(list(same))) # When forecasting the price of house, we have seen that these feature can be strongly affect to the price are # >> Numeric features: `'bathrooms'`, `'beds'`, `'square_feet'`, `'availability_30'`, `'availability_365'`, `'availability_60'`, `'availability_90'`, `'number_of_reviews'`, `'accommodates'`, `'review_scores_rating'`, `review_scores_location'` and `'host_listings_count'` # >> Category features: `'bed_type'`, `'smart_location'`, `'property_type'`, `'room_type'` and may be `'neighbourhood_group_cleansed'` numr_feat = sorted( [ "bathrooms", "beds", "square_feet", "availability_30", "availability_365", "availability_60", "availability_90", "number_of_reviews", "accommodates", "review_scores_rating", "extra_people_fee", "price", "review_scores_location", "host_listings_count", ] ) cate_feat = [ "bed_type", "smart_location", "property_type", "room_type", "neighbourhood_group_cleansed", ] # - Check again which features in the numeric_features can be influenced the prices # To solve this, I will use the heatmap on the correlation matrix. import seaborn as sns bos_listings = bos_listings[same] sea_listings = sea_listings[same] dfs = [bos_listings, sea_listings] cities = ["Boston", "Seattle"] fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(23, 11)) for idx, df in enumerate(dfs): corrs = df[numr_feat].corr() sns.heatmap(corrs, cbar=True, annot=True, ax=ax[idx]) ax[idx].set_title("Correlations heatmap at " + cities[idx]) # So, # >- At `Seattle`, the numeric features that strongly correlated to `price` be : `"accomodates"`: 0.67, `"beds"` : 0.59, and `"bathrooms`: 0.52; the other features is weakly-correlated to `price`. # >- At `Boston`, the numeric features that strongly correlated to `price` be : `"accomodates"`: 0.44, `"beds"` : 0.33, next be "bathrooms", "review_scores_rating" and "availability_30". The `"availability_30"` at `Boston` is about `0.13` shows that the reservation in this city is almost no longer than 1 month. # Now, how about the category features? dfs = [bos_listings, sea_listings] cities = ["Boston", "Seattle"] fig, ax = plt.subplots(nrows=4, ncols=2, figsize=(23, 19)) ax = ax.ravel() for k in range(2): df = dfs[k][cate_feat] for q in range(len(cate_feat) - 1): z = dfs[k].groupby(cate_feat[q]).mean()[["price"]] z.sort_values(by="price", ascending=False)[:10].reset_index().plot( x=cate_feat[q], y="price", title=cities[k] + " " + cate_feat[q], color="y", kind="barh", ax=ax[q * 2 + k], ) # This is easy to see that: # >> In the column `bed_type`, the value `Real_bed` has the highest average_price at both cities, while the `Airbed` is lowest in `Boston` and `Couch` is lowest in `Seattle`. # >> In the column `room_type`, the value `Entire home/apt` has the highest average_price at both cities, and obviously the `shared_room` is lowest. # >> In the column `property_type`, the value `Guesthouse` has the highest average_price in `Boston` while in `Seattle` is `Boat`, the lowest average_price in `Boston` is `Bed & breakfast` while `Cabin` is the lowest in `Seattle`. # **But, we must consider carefully the last features: `neighbourhood_group_cleansed`** # In `Boston` city, this feature totally contains the missing values, indeed; pd.DataFrame( bos_listings[cate_feat].isnull().sum() / bos_listings.shape[0] * 100 ).rename(columns={0: "percentage of null"}) # Hence, we must drop the column `neighbourhood_group_cleansed`, but do you wonder how it affect to the average_price in `Seattle`? Look at the chart below! fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 5)) sea_listings.groupby("neighbourhood_group_cleansed").mean()[["price"]].plot( kind="bar", rot=45, color="y", ax=ax ) for p in ax.patches: ax.annotate(str(p.get_height())[:5], (p.get_x() * 1.005, p.get_height() * 1.005)) # On the diagram above (at `Seattle` city) we can see that prices differ depending on the location (neighbourhood). The highest average prices are in 3 areas: `Magnolia`, `Queen Anne` and `Downtown`; while the lowest is in `Delridge`, which is not surprising # #### predict price # Remind that we will drop the column `neighbourhood_group_cleansed`, we are able to apply `Machine learning methods` to see which features in dataset influence the price the most. In order to do this we will train two popular models (`Random Forest regressor [bagging]` and `Gradient Boosting regressor [boosting]`) based on decision trees and look at resulting feature importances. from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import ( mean_squared_error as mse, r2_score, mean_absolute_error as mae, ) def mape(actual, predict): return np.mean(np.abs(actual - predict) / actual) clfs = [ RandomForestRegressor( n_estimators=200, max_depth=9, criterion="mse", random_state=42, n_jobs=-1 ), GradientBoostingRegressor( n_estimators=200, learning_rate=0.05, subsample=0.8, max_depth=7 ), ] def display(df): methods = ["Random Forest", "Gradient Boosting"] cat_columns = list(df.select_dtypes(include=["object"]).columns) df_cate = df[cat_columns] X = pd.concat( [ df[numr_feat], pd.get_dummies(df_cate, prefix_sep="_is_", dummy_na=True, drop_first=True), ], axis=1, ).drop(columns="price") y = df.price X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) fig, ax = plt.subplots( nrows=3, ncols=2, figsize=(25, 17.5), gridspec_kw={"height_ratios": [2, 3, 5]} ) ax = ax.ravel() for k, clf in enumerate(clfs): model = clf.fit(X_train, y_train) feature_importance = 100 * model.feature_importances_ sorted_idx = np.argsort(feature_importance) dat = pd.DataFrame( { "index": list(x[:20] + "..." for x in X_train.columns[sorted_idx]), "feature importance": feature_importance[sorted_idx], } )[-10:] train_pred = model.predict(X_train) test_pred = model.predict(X_test) # plot scor = pd.DataFrame( { "score_names": ["MAPE", "R2_score"], "training": [mape(y_train, train_pred), r2_score(y_train, train_pred)], "test_set": [mape(y_test, test_pred), r2_score(y_test, test_pred)], } ).set_index("score_names") scor.plot(ax=ax[k], kind="barh") err = ( pd.DataFrame( { "score_names": ["MAE", "MSE", "RMSE"], "training": [ mae(y_train, train_pred), mse(y_train, train_pred), np.sqrt(mse(y_train, train_pred)), ], "test_set": [ mae(y_test, test_pred), mse(y_test, test_pred), np.sqrt(mse(y_test, test_pred)), ], } ) .set_index("score_names") .plot(ax=ax[k + 2], kind="barh") ) dat.set_index("index").plot( ax=ax[k + 4], rot=45, title="Top10 feature importace using " + methods[k], kind="barh", ) # > Forecasting in Seattle. display(dfs[1]) # >- And `Boston` display(dfs[0]) # #### Comments on Question 3. # These scores indicated that the model make `overfitting`, we must remove some features is not related / not correlated the price. # Moreover, to find the best `r2_score`, we can use grid_search CV / hyper-tuning parameters to solve this # ## Q4. Can you find negative and positive reviews based on text? # Firstly, viewing each comment line by line is `impossible` for k in range(10): print(k, 29 * "-") print("Seattle: \t", sea_reviews.comments[k]) print("Boston: \t", bos_reviews.comments[k]) # - We saw that the keywords in the first 10 comments is # >- "Perfect", "great", "beautiful", "well", "hola", "pleasant" etc... to describe the `positive` # >- "issue", "terrible", "stupid".... for `negative` # But I want to look over them, so I will use N-grams analytics on the top 100 of `uni-grams`, `bi-grams` and `tri-grams` to solve this bos_reviews["city"] = "boston" sea_reviews["city"] = "seattle" df_reviews = pd.concat([bos_reviews, sea_reviews]) df_reviews.head() # #### Now, look at the code below # First is the `Uni-gram`, from the charts below, we have seen that # >- The words `great` is appeared almost in the comments_review in both cities; # >- The second and the third popular words is `stay` and `place` in both cities # >- But the next 4th popular word is `us` in `Seattle` while in `Boston` was `apartment` # >- etc. from wordcloud import STOPWORDS from collections import defaultdict import string def ngrams_generative(text, n_grams=1): token = [ token for token in str(text).lower().split(" ") if token != "" if token not in STOPWORDS ] ngrams = zip(*[token[i:] for i in range(n_grams)]) return [" ".join(ngram) for ngram in ngrams] def Ngram_topN(dataset, n_grams, top_N=100): bos_ngrams = defaultdict(int) sea_ngrams = defaultdict(int) is_sea = dataset["city"] == "seattle" for mes in dataset[is_sea]["comments"]: for word in ngrams_generative(mes, n_grams): sea_ngrams[word] += 1 for mes in dataset[~is_sea]["comments"]: for word in ngrams_generative(mes, n_grams): bos_ngrams[word] += 1 sea_ngrams = pd.DataFrame(sorted(sea_ngrams.items(), key=lambda x: x[1])[::-1]) bos_ngrams = pd.DataFrame(sorted(bos_ngrams.items(), key=lambda x: x[1])[::-1]) fig, ax = plt.subplots(ncols=2, figsize=(20, (n_grams + 1) * top_N // 10), dpi=100) plt.tight_layout() sns.barplot( y=sea_ngrams[0].values[:top_N], x=sea_ngrams[1].values[:top_N], ax=ax[0], color="red", ) sns.barplot( y=bos_ngrams[0].values[:top_N], x=bos_ngrams[1].values[:top_N], ax=ax[1], color="green", ) for i in range(2): ax[i].spines["right"].set_visible(False) ax[i].set_xlabel("") ax[i].set_ylabel("") ax[i].tick_params(axis="x", labelsize=12) ax[i].tick_params(axis="y", labelsize=12) Ngram_used = {1: "unigram", 2: "bigrams", 3: "trigrams"} ax[0].set_title( f"Top {top_N} most common {Ngram_used[n_grams]} of comments_review in Seattle", fontsize=15, ) ax[1].set_title( f"Top {top_N} most common {Ngram_used[n_grams]} of comments_review in Boston", fontsize=15, ) Ngram_topN(df_reviews, 1, 75) # How about the `Bi-grams` of the `comments` # >- Wow, the most 4 popular `bi-grams` in both cities is the same : `definitely stay`, `highly recommended`, `walking distance` and `stay again` # >- Almost the top 75-bigrams reflect the positive comments with the words like: `highly recommended`, `great location`, `great place`, `great host`, `great location`, `clean well`. Ngram_topN(df_reviews, 2, 75) # How about the `Tri-grams` of the `comments` Ngram_topN(df_reviews, 3, 100) # => From this, we can examine easier which words meant positve and negative in the `comments_reviews`. # ## Q5. Make 2 time-series model to forecasting the difference of the reservation_prices between 2 cities # Our dataset is in 1 year only, we can make a ARIMA, SARIMA, SARIMAX model or something like this. # But here, in this case, we can use a assumption of Markov properties: `the price of present is depended on the avg_price of the previous 3 days or 1 week` and I will use another Machine Learning models like `Random Forest regressor` and `Gradient Boosting` # #### Step 1. Making a rolling-window # Here, I let the `window_size = 3` from statsmodels.tsa.arima_model import ARIMA cities = ["boston", "seattle"] df = calendar_df[calendar_df.city == cities[0]] df["month"] = df["date"].dt.month df = df.set_index("date")[["month", "price"]] df = df.resample("D").mean() window_size = 3 df["previous_roll_avg"] = ( df["price"].rolling(window_size, win_type="gaussian").mean(std=2) ) df["previous_roll_std"] = df["price"].rolling(window_size).std() df.head(10) # #### Making a shifted-datetime # For a fixed `window_size = 3`, I will establish a model that based on the **3 previous days** for predicting the average_price today, so we must **shift** the rolling_window by 1 steps! Look at the following output # For example, we will forecast the price of `04 Jul 2017` based on the prices in `01 - 03, Jul 2017` df[["previous_roll_avg", "previous_roll_std"]] = df[ ["previous_roll_avg", "previous_roll_std"] ].shift(periods=1) df["previous k_days price"] = df["price"].shift(periods=window_size) df.head(10) # #### Replacing the missing_values by its averages # Here, we will replace the missing values (`NaN`) by the averages values at each column that contains missing values. df = df.fillna(df.mean()) df[["price", "previous_roll_avg"]].plot(figsize=(10, 5)) # #### Making a model # Now, I create `Random Forest` to make a forecasting model! X = df.drop(columns="price") y = df.price X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33) clfs = [ RandomForestRegressor(n_estimators=200, max_depth=9, criterion="mse", n_jobs=-1), GradientBoostingRegressor( n_estimators=200, learning_rate=0.05, subsample=0.8, max_depth=7 ), ] clf = clfs[0] clf.fit(X_train, y_train) preds = y_test.reset_index() preds["forecast"] = clf.predict(X_test) preds.set_index("date").plot(figsize=(20, 5)) r2_score(y_test, clf.predict(X_test)) # So, the `r2_score` is about 0.88 when using `Random forest regressor` # #### Wraping up together ## input window_size = 3 df = calendar_df[calendar_df.city == cities[1]] ## function : def display_timeseries(window_size, city): df = calendar_df[calendar_df.city == city] df["month"] = df["date"].dt.month df = df.set_index("date")[["month", "price"]] df = df.resample("D").mean() df["previous_roll_avg"] = ( df["price"].rolling(window_size, win_type="gaussian").mean(std=2) ) df["previous_roll_std"] = df["price"].rolling(window_size).std() df[["previous_roll_avg", "previous_roll_std"]] = df[ ["previous_roll_avg", "previous_roll_std"] ].shift(periods=1) df["previous k_days price"] = df["price"].shift(periods=window_size) df = df.fillna(df.mean()) X = df.drop(columns="price") y = df.price X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33) clfs = [ RandomForestRegressor( n_estimators=200, max_depth=9, criterion="mse", n_jobs=-1 ), GradientBoostingRegressor( n_estimators=200, learning_rate=0.05, subsample=0.8, max_depth=7 ), ] methods = ["Random forest regressor", "Gradient Boosting"] fig, ax = plt.subplots( nrows=5, ncols=2, figsize=(24, 35), gridspec_kw={"height_ratios": [2, 3, 2.5, 2, 2]}, ) ax = ax.ravel() for k, model in enumerate(clfs): model.fit(X_train, y_train) feature_importance = 100 * model.feature_importances_ sorted_idx = np.argsort(feature_importance) dat = pd.DataFrame( { "index": list(x[:20] + "..." for x in X_train.columns[sorted_idx]), "feature importance": feature_importance[sorted_idx], } ) train_pred = model.predict(X_train) test_pred = model.predict(X_test) # plot scor = pd.DataFrame( { "score_names": ["MAPE", "R2_score"], "training": [mape(y_train, train_pred), r2_score(y_train, train_pred)], "test_set": [mape(y_test, test_pred), r2_score(y_test, test_pred)], } ).set_index("score_names") scor.plot( ax=ax[k], title="R2_score & MAPE using " + methods[k] + " at " + city, kind="barh", ) err = ( pd.DataFrame( { "score_names": ["MAE", "MSE", "RMSE"], "training": [ mae(y_train, train_pred), mse(y_train, train_pred), np.sqrt(mse(y_train, train_pred)), ], "test_set": [ mae(y_test, test_pred), mse(y_test, test_pred), np.sqrt(mse(y_test, test_pred)), ], } ) .set_index("score_names") .plot(ax=ax[k + 2], kind="barh") ) dat.set_index("index").plot( ax=ax[k + 4], rot=45, title="Feature importace using " + methods[k], kind="barh", ) preds = y_train.reset_index() preds["forecast"] = model.predict(X_train) preds.set_index("date").plot( ax=ax[k + 6], title="forecasting on training set using " + methods[k] ) preds = y_test.reset_index() preds["forecast"] = model.predict(X_test) preds.set_index("date").plot( ax=ax[k + 8], title="forecasting on test set using " + methods[k] ) display_timeseries(3, city=cities[0]) # Yeah! Our forecasting model is pretty good in `Seattle` when the `R2_score` is over 0.85 and the `MAPE` is lower than 0.02. And how about in `Boston`? # >> In the training set, the model is nearly concided when using `Gradient Boosting` but the `R2_score` on the `test_set` when using `Random Forest` is better!! # >> The `MAE < 8` on both model showed that our algorithms is right (noting that the average_price at Seattle is over `180$` display_timeseries(3, city=cities[0])
false
0
11,788
0
11,788
11,788
69549880
## installing packages # imports import gc import os import shutil import pandas as pd import numpy as np from pathlib import Path ## define configuration PATH_TRAIN = "../input/tabular-playground-series-aug-2021/train.csv" PATH_TEST = "../input/tabular-playground-series-aug-2021/test.csv" time_limit = 20000 # (in secs. for train run: set as 300 which gave public score ~10) train = pd.read_csv(PATH_TRAIN, index_col="id") test = pd.read_csv(PATH_TEST, index_col="id") target_loss = train.loss train.drop(["loss"], axis=1, inplace=True) train.head() ## imports from autogluon.tabular import TabularPredictor, TabularDataset ## run model for carbon monoxide train["target"] = target_loss model_ag = TabularPredictor(label="target") model_ag.fit( train_data=train, time_limit=time_limit, presets="best_quality", num_stack_levels=3, num_bag_folds=5, num_bag_sets=1, ) del train["target"] ## check leaderboard for carbon monoxide model_ag.leaderboard() preds_autogluon = model_ag.predict(TabularDataset(test)) ## create submission submission = pd.DataFrame({"id": preds_autogluon.index, "loss": preds_autogluon}) submission.head() ## save submission PATH_AUTOGLUON_SUBMISSION = "submission_autogluon_" + str(time_limit) + ".csv" PATH_AUTOGLUON_SUBMISSION submission.to_csv(PATH_AUTOGLUON_SUBMISSION, index=False) ## clear memory shutil.rmtree("AutogluonModels") gc.collect()
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/549/69549880.ipynb
null
null
[{"Id": 69549880, "ScriptId": 18989799, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 469902, "CreationDate": "08/01/2021 11:58:52", "VersionNumber": 2.0, "Title": "[TPS Aug 2021] Autogluon 101", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 67.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 60.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
null
null
null
null
## installing packages # imports import gc import os import shutil import pandas as pd import numpy as np from pathlib import Path ## define configuration PATH_TRAIN = "../input/tabular-playground-series-aug-2021/train.csv" PATH_TEST = "../input/tabular-playground-series-aug-2021/test.csv" time_limit = 20000 # (in secs. for train run: set as 300 which gave public score ~10) train = pd.read_csv(PATH_TRAIN, index_col="id") test = pd.read_csv(PATH_TEST, index_col="id") target_loss = train.loss train.drop(["loss"], axis=1, inplace=True) train.head() ## imports from autogluon.tabular import TabularPredictor, TabularDataset ## run model for carbon monoxide train["target"] = target_loss model_ag = TabularPredictor(label="target") model_ag.fit( train_data=train, time_limit=time_limit, presets="best_quality", num_stack_levels=3, num_bag_folds=5, num_bag_sets=1, ) del train["target"] ## check leaderboard for carbon monoxide model_ag.leaderboard() preds_autogluon = model_ag.predict(TabularDataset(test)) ## create submission submission = pd.DataFrame({"id": preds_autogluon.index, "loss": preds_autogluon}) submission.head() ## save submission PATH_AUTOGLUON_SUBMISSION = "submission_autogluon_" + str(time_limit) + ".csv" PATH_AUTOGLUON_SUBMISSION submission.to_csv(PATH_AUTOGLUON_SUBMISSION, index=False) ## clear memory shutil.rmtree("AutogluonModels") gc.collect()
false
0
468
4
468
468
69549563
<jupyter_start><jupyter_text>New Plant Diseases Dataset **This dataset is recreated using offline augmentation from the original dataset. The original dataset can be found on [this][1] github repo. This dataset consists of about 87K rgb images of healthy and diseased crop leaves which is categorized into 38 different classes. The total dataset is divided into 80/20 ratio of training and validation set preserving the directory structure. A new directory containing 33 test images is created later for prediction purpose.** [1]: https://github.com/spMohanty/PlantVillage-Dataset Kaggle dataset identifier: new-plant-diseases-dataset <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session def res_identity(x, filters): # renet block where dimension doesnot change. # The skip connection is just simple identity conncection # we will have 3 blocks and then input will be added x_skip = x # this will be used for addition with the residual block f1, f2 = filters # first block x = Conv2D( f1, kernel_size=(1, 1), strides=(1, 1), padding="valid", kernel_regularizer=l2(0.001), )(x) x = BatchNormalization()(x) x = Activation(activations.relu)(x) # second block # bottleneck (but size kept same with padding) x = Conv2D( f1, kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_regularizer=l2(0.001), )(x) x = BatchNormalization()(x) x = Activation(activations.relu)(x) # third block activation used after adding the input x = Conv2D( f2, kernel_size=(1, 1), strides=(1, 1), padding="valid", kernel_regularizer=l2(0.001), )(x) x = BatchNormalization()(x) # x = Activation(activations.relu)(x) # add the input x = Add()([x, x_skip]) x = Activation(activations.relu)(x) return x def res_conv(x, s, filters): """ here the input size changes""" x_skip = x f1, f2 = filters # first block x = Conv2D( f1, kernel_size=(1, 1), strides=(s, s), padding="valid", kernel_regularizer=l2(0.001), )(x) # when s = 2 then it is like downsizing the feature map x = BatchNormalization()(x) x = Activation(activations.relu)(x) # second block x = Conv2D( f1, kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_regularizer=l2(0.001), )(x) x = BatchNormalization()(x) x = Activation(activations.relu)(x) # third block x = Conv2D( f2, kernel_size=(1, 1), strides=(1, 1), padding="valid", kernel_regularizer=l2(0.001), )(x) x = BatchNormalization()(x) # shortcut x_skip = Conv2D( f2, kernel_size=(1, 1), strides=(s, s), padding="valid", kernel_regularizer=l2(0.001), )(x_skip) x_skip = BatchNormalization()(x_skip) # add x = Add()([x, x_skip]) x = Activation(activations.relu)(x) return x from keras.layers import Input def resnet50(): input_im = Input(shape=(384, 384, 3)) # cifar 10 images size x = ZeroPadding2D(padding=(3, 3))(input_im) # 1st stage # here we perform maxpooling, see the figure above x = Conv2D(64, kernel_size=(7, 7), strides=(2, 2))(x) x = BatchNormalization()(x) x = Activation(activations.relu)(x) x = MaxPooling2D((3, 3), strides=(2, 2))(x) # 2nd stage # frm here on only conv block and identity block, no pooling x = res_conv(x, s=1, filters=(64, 256)) x = res_identity(x, filters=(64, 256)) x = res_identity(x, filters=(64, 256)) # 3rd stage x = res_conv(x, s=2, filters=(128, 512)) x = res_identity(x, filters=(128, 512)) x = res_identity(x, filters=(128, 512)) x = res_identity(x, filters=(128, 512)) # 4th stage x = res_conv(x, s=2, filters=(256, 1024)) x = res_identity(x, filters=(256, 1024)) x = res_identity(x, filters=(256, 1024)) x = res_identity(x, filters=(256, 1024)) x = res_identity(x, filters=(256, 1024)) x = res_identity(x, filters=(256, 1024)) # 5th stage x = res_conv(x, s=2, filters=(512, 2048)) x = res_identity(x, filters=(512, 2048)) x = res_identity(x, filters=(512, 2048)) # ends with average pooling and dense connection x = AveragePooling2D((2, 2), padding="same")(x) x = Flatten()(x) x = Dense(38, activation="softmax", kernel_initializer="he_normal")( x ) # multi-class # define the model model = Model(inputs=input_im, outputs=x, name="Resnet50") return model from keras.utils import np_utils from keras.models import Sequential from keras.layers import Convolution2D, Dense, MaxPool2D, Activation, Dropout, Flatten from keras.layers import GlobalAveragePooling2D from keras.optimizers import Adam from sklearn.model_selection import train_test_split from keras.layers.normalization import BatchNormalization import os import pandas as pd import plotly.graph_objs as go import matplotlib.ticker as ticker import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import cv2 import numpy as np from sklearn.model_selection import train_test_split import glob def get_files(directory): if not os.path.exists(directory): return 0 count = 0 for current_path, dirs, files in os.walk(directory): for dr in dirs: count += len(glob.glob(os.path.join(current_path, dr + "/*"))) return count train_dir = "/kaggle/input/new-plant-diseases-dataset/New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)/train" test_dir = "/kaggle/input/new-plant-diseases-dataset/New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)/valid" os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" train_samples = get_files(train_dir) num_classes = len(glob.glob(train_dir + "/*")) test_samples = get_files(test_dir) print(num_classes, "Classes") print(train_samples, "Train images") print(test_samples, "Test images") from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) test_datagen = ImageDataGenerator(rescale=1.0 / 255) img_width, img_height = 384, 384 input_shape = (img_width, img_height, 3) batch_size = 32 train_generator = train_datagen.flow_from_directory( train_dir, target_size=(img_width, img_height), batch_size=batch_size ) test_generator = test_datagen.flow_from_directory( test_dir, shuffle=True, target_size=(img_width, img_height), batch_size=batch_size ) from tensorflow.keras.layers import ( Input, Conv2D, MaxPooling2D, ZeroPadding2D, Flatten, BatchNormalization, AveragePooling2D, Dense, Activation, Add, ) from tensorflow.keras.models import Model from tensorflow.keras import activations from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.regularizers import l2 from keras.applications.resnet50 import ResNet50 from keras.models import Model import keras from keras import optimizers model_finetuned = resnet50() model_finetuned.summary() from keras.callbacks import ReduceLROnPlateau validation_generator = test_datagen.flow_from_directory( test_dir, target_size=(img_height, img_width), batch_size=batch_size ) from keras import callbacks model_finetuned.compile( optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] ) earlystopping = callbacks.EarlyStopping( monitor="val_loss", mode="min", restore_best_weights=True ) history_1 = model_finetuned.fit( train_generator, steps_per_epoch=None, epochs=8, validation_data=validation_generator, validation_steps=None, verbose=1, callbacks=[ ReduceLROnPlateau(monitor="val_loss", factor=0.3, patience=3, min_lr=0.000001), earlystopping, ], use_multiprocessing=False, shuffle=True, ) from keras.models import Sequential from keras.layers import Dense import matplotlib.pyplot as plt import numpy print(history_1.history.keys()) plt.plot(history_1.history["accuracy"]) plt.plot(history_1.history["val_accuracy"]) plt.title("model accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() # summarize history for loss plt.plot(history_1.history["loss"]) plt.plot(history_1.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() from keras.models import load_model model_finetuned.save("plantddusingresnet50trainfalse.h5") from keras.models import load_model model = load_model("plantddusingresnet50trainfalse.h5") classes = list(train_generator.class_indices.keys()) import numpy as np import matplotlib.pyplot as plt # Pre-Processing test data same as train data. img_width = 256 img_height = 256 model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) from keras.preprocessing import image def prepare(img_path): img = image.load_img(img_path, target_size=(384, 384)) x = image.img_to_array(img) x = x / 255 return np.expand_dims(x, axis=0) result = model.predict( [ prepare( "/kaggle/input/new-plant-diseases-dataset/test/test/TomatoYellowCurlVirus6.JPG" ) ] ) disease = image.load_img( "/kaggle/input/new-plant-diseases-dataset/test/test/TomatoYellowCurlVirus6.JPG" ) plt.imshow(disease) print(result) import numpy as np classresult = np.argmax(result, axis=1) print(classes[classresult[0]])
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/549/69549563.ipynb
new-plant-diseases-dataset
vipoooool
[{"Id": 69549563, "ScriptId": 18987625, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5587444, "CreationDate": "08/01/2021 11:54:46", "VersionNumber": 1.0, "Title": "resnet50study", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 271.0, "LinesInsertedFromPrevious": 271.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 92864528, "KernelVersionId": 69549563, "SourceDatasetVersionId": 182633}]
[{"Id": 182633, "DatasetId": 78313, "DatasourceVersionId": 193494, "CreatorUserId": 2009285, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "11/18/2018 07:09:16", "VersionNumber": 2.0, "Title": "New Plant Diseases Dataset", "Slug": "new-plant-diseases-dataset", "Subtitle": "Image dataset containing different healthy and unhealthy crop leaves.", "Description": "**This dataset is recreated using offline augmentation from the original dataset. The original dataset can be found on [this][1] github repo. This dataset consists of about 87K rgb images of healthy and diseased crop leaves which is categorized into 38 different classes. The total dataset is divided into 80/20 ratio of training and validation set preserving the directory structure.\nA new directory containing 33 test images is created later for prediction purpose.**\n\n\n [1]: https://github.com/spMohanty/PlantVillage-Dataset", "VersionNotes": "New Test Images", "TotalCompressedBytes": 1445887779.0, "TotalUncompressedBytes": 1445887779.0}]
[{"Id": 78313, "CreatorUserId": 2009285, "OwnerUserId": 2009285.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 182633.0, "CurrentDatasourceVersionId": 193494.0, "ForumId": 87652, "Type": 2, "CreationDate": "11/16/2018 12:17:57", "LastActivityDate": "11/16/2018", "TotalViews": 387678, "TotalDownloads": 47287, "TotalVotes": 766, "TotalKernels": 244}]
[{"Id": 2009285, "UserName": "vipoooool", "DisplayName": "Samir Bhattarai", "RegisterDate": "06/21/2018", "PerformanceTier": 0}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session def res_identity(x, filters): # renet block where dimension doesnot change. # The skip connection is just simple identity conncection # we will have 3 blocks and then input will be added x_skip = x # this will be used for addition with the residual block f1, f2 = filters # first block x = Conv2D( f1, kernel_size=(1, 1), strides=(1, 1), padding="valid", kernel_regularizer=l2(0.001), )(x) x = BatchNormalization()(x) x = Activation(activations.relu)(x) # second block # bottleneck (but size kept same with padding) x = Conv2D( f1, kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_regularizer=l2(0.001), )(x) x = BatchNormalization()(x) x = Activation(activations.relu)(x) # third block activation used after adding the input x = Conv2D( f2, kernel_size=(1, 1), strides=(1, 1), padding="valid", kernel_regularizer=l2(0.001), )(x) x = BatchNormalization()(x) # x = Activation(activations.relu)(x) # add the input x = Add()([x, x_skip]) x = Activation(activations.relu)(x) return x def res_conv(x, s, filters): """ here the input size changes""" x_skip = x f1, f2 = filters # first block x = Conv2D( f1, kernel_size=(1, 1), strides=(s, s), padding="valid", kernel_regularizer=l2(0.001), )(x) # when s = 2 then it is like downsizing the feature map x = BatchNormalization()(x) x = Activation(activations.relu)(x) # second block x = Conv2D( f1, kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_regularizer=l2(0.001), )(x) x = BatchNormalization()(x) x = Activation(activations.relu)(x) # third block x = Conv2D( f2, kernel_size=(1, 1), strides=(1, 1), padding="valid", kernel_regularizer=l2(0.001), )(x) x = BatchNormalization()(x) # shortcut x_skip = Conv2D( f2, kernel_size=(1, 1), strides=(s, s), padding="valid", kernel_regularizer=l2(0.001), )(x_skip) x_skip = BatchNormalization()(x_skip) # add x = Add()([x, x_skip]) x = Activation(activations.relu)(x) return x from keras.layers import Input def resnet50(): input_im = Input(shape=(384, 384, 3)) # cifar 10 images size x = ZeroPadding2D(padding=(3, 3))(input_im) # 1st stage # here we perform maxpooling, see the figure above x = Conv2D(64, kernel_size=(7, 7), strides=(2, 2))(x) x = BatchNormalization()(x) x = Activation(activations.relu)(x) x = MaxPooling2D((3, 3), strides=(2, 2))(x) # 2nd stage # frm here on only conv block and identity block, no pooling x = res_conv(x, s=1, filters=(64, 256)) x = res_identity(x, filters=(64, 256)) x = res_identity(x, filters=(64, 256)) # 3rd stage x = res_conv(x, s=2, filters=(128, 512)) x = res_identity(x, filters=(128, 512)) x = res_identity(x, filters=(128, 512)) x = res_identity(x, filters=(128, 512)) # 4th stage x = res_conv(x, s=2, filters=(256, 1024)) x = res_identity(x, filters=(256, 1024)) x = res_identity(x, filters=(256, 1024)) x = res_identity(x, filters=(256, 1024)) x = res_identity(x, filters=(256, 1024)) x = res_identity(x, filters=(256, 1024)) # 5th stage x = res_conv(x, s=2, filters=(512, 2048)) x = res_identity(x, filters=(512, 2048)) x = res_identity(x, filters=(512, 2048)) # ends with average pooling and dense connection x = AveragePooling2D((2, 2), padding="same")(x) x = Flatten()(x) x = Dense(38, activation="softmax", kernel_initializer="he_normal")( x ) # multi-class # define the model model = Model(inputs=input_im, outputs=x, name="Resnet50") return model from keras.utils import np_utils from keras.models import Sequential from keras.layers import Convolution2D, Dense, MaxPool2D, Activation, Dropout, Flatten from keras.layers import GlobalAveragePooling2D from keras.optimizers import Adam from sklearn.model_selection import train_test_split from keras.layers.normalization import BatchNormalization import os import pandas as pd import plotly.graph_objs as go import matplotlib.ticker as ticker import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import cv2 import numpy as np from sklearn.model_selection import train_test_split import glob def get_files(directory): if not os.path.exists(directory): return 0 count = 0 for current_path, dirs, files in os.walk(directory): for dr in dirs: count += len(glob.glob(os.path.join(current_path, dr + "/*"))) return count train_dir = "/kaggle/input/new-plant-diseases-dataset/New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)/train" test_dir = "/kaggle/input/new-plant-diseases-dataset/New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)/valid" os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" train_samples = get_files(train_dir) num_classes = len(glob.glob(train_dir + "/*")) test_samples = get_files(test_dir) print(num_classes, "Classes") print(train_samples, "Train images") print(test_samples, "Test images") from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) test_datagen = ImageDataGenerator(rescale=1.0 / 255) img_width, img_height = 384, 384 input_shape = (img_width, img_height, 3) batch_size = 32 train_generator = train_datagen.flow_from_directory( train_dir, target_size=(img_width, img_height), batch_size=batch_size ) test_generator = test_datagen.flow_from_directory( test_dir, shuffle=True, target_size=(img_width, img_height), batch_size=batch_size ) from tensorflow.keras.layers import ( Input, Conv2D, MaxPooling2D, ZeroPadding2D, Flatten, BatchNormalization, AveragePooling2D, Dense, Activation, Add, ) from tensorflow.keras.models import Model from tensorflow.keras import activations from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.regularizers import l2 from keras.applications.resnet50 import ResNet50 from keras.models import Model import keras from keras import optimizers model_finetuned = resnet50() model_finetuned.summary() from keras.callbacks import ReduceLROnPlateau validation_generator = test_datagen.flow_from_directory( test_dir, target_size=(img_height, img_width), batch_size=batch_size ) from keras import callbacks model_finetuned.compile( optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] ) earlystopping = callbacks.EarlyStopping( monitor="val_loss", mode="min", restore_best_weights=True ) history_1 = model_finetuned.fit( train_generator, steps_per_epoch=None, epochs=8, validation_data=validation_generator, validation_steps=None, verbose=1, callbacks=[ ReduceLROnPlateau(monitor="val_loss", factor=0.3, patience=3, min_lr=0.000001), earlystopping, ], use_multiprocessing=False, shuffle=True, ) from keras.models import Sequential from keras.layers import Dense import matplotlib.pyplot as plt import numpy print(history_1.history.keys()) plt.plot(history_1.history["accuracy"]) plt.plot(history_1.history["val_accuracy"]) plt.title("model accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() # summarize history for loss plt.plot(history_1.history["loss"]) plt.plot(history_1.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "test"], loc="upper left") plt.show() from keras.models import load_model model_finetuned.save("plantddusingresnet50trainfalse.h5") from keras.models import load_model model = load_model("plantddusingresnet50trainfalse.h5") classes = list(train_generator.class_indices.keys()) import numpy as np import matplotlib.pyplot as plt # Pre-Processing test data same as train data. img_width = 256 img_height = 256 model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) from keras.preprocessing import image def prepare(img_path): img = image.load_img(img_path, target_size=(384, 384)) x = image.img_to_array(img) x = x / 255 return np.expand_dims(x, axis=0) result = model.predict( [ prepare( "/kaggle/input/new-plant-diseases-dataset/test/test/TomatoYellowCurlVirus6.JPG" ) ] ) disease = image.load_img( "/kaggle/input/new-plant-diseases-dataset/test/test/TomatoYellowCurlVirus6.JPG" ) plt.imshow(disease) print(result) import numpy as np classresult = np.argmax(result, axis=1) print(classes[classresult[0]])
false
0
3,088
0
3,239
3,088
69549650
# # CREDITS "https://www.kaggle.com/alexryzhkov/tps-lightautoml-baseline-with-pseudolabels" "https://www.kaggle.com/alekseyromanovich/leaked-data" # The idea is to get the output of differnt training models and mean them to get the better output # you can either train and get the all outputs by yourself or you can use submissions from others and create mean of the data both ways work. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.metrics import mean_absolute_error # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session sample_submission = pd.read_csv( "/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv" ) sample_submission.shape model_1 = pd.read_csv( "/kaggle/input/tps-eda-lstm-pseudolabels-interpolate/Submission.csv" ) model_2 = pd.read_csv( "/kaggle/input/k/alexryzhkov/k/alexryzhkov/tps-lightautoml-baseline-with-pseudolabels/lightautoml_with_pseudolabelling_kernel_version_16.csv" ) total_data = pd.concat([model_1, model_2]).groupby(level=0).mean() total_data # please upvote if you found it useful. sample_submission.drop("date_time", axis=1, inplace=True) # adding the date column total_data["date_time"] = model_2["date_time"] final_data = total_data[ ["date_time", "target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"] ] final_data.to_csv("submission_.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/549/69549650.ipynb
null
null
[{"Id": 69549650, "ScriptId": 18524438, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6096701, "CreationDate": "08/01/2021 11:55:55", "VersionNumber": 17.0, "Title": "TPS july 2021 competetion", "EvaluationDate": "08/01/2021", "IsChange": false, "TotalLines": 53.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 53.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
null
null
null
null
# # CREDITS "https://www.kaggle.com/alexryzhkov/tps-lightautoml-baseline-with-pseudolabels" "https://www.kaggle.com/alekseyromanovich/leaked-data" # The idea is to get the output of differnt training models and mean them to get the better output # you can either train and get the all outputs by yourself or you can use submissions from others and create mean of the data both ways work. import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.metrics import mean_absolute_error # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("/kaggle/input"): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session sample_submission = pd.read_csv( "/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv" ) sample_submission.shape model_1 = pd.read_csv( "/kaggle/input/tps-eda-lstm-pseudolabels-interpolate/Submission.csv" ) model_2 = pd.read_csv( "/kaggle/input/k/alexryzhkov/k/alexryzhkov/tps-lightautoml-baseline-with-pseudolabels/lightautoml_with_pseudolabelling_kernel_version_16.csv" ) total_data = pd.concat([model_1, model_2]).groupby(level=0).mean() total_data # please upvote if you found it useful. sample_submission.drop("date_time", axis=1, inplace=True) # adding the date column total_data["date_time"] = model_2["date_time"] final_data = total_data[ ["date_time", "target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"] ] final_data.to_csv("submission_.csv", index=False)
false
0
603
3
603
603
69549332
<jupyter_start><jupyter_text>Geoffrey Hinton Fellowship, Hackathon 2 An e-commerce company wants to recommend products to its users. The company has collected only transaction data in the past. The training dataset has only 3 columns - user_id, Product bought and Order value of the product. Using this dataset, predict for all the users in the training dataset, the top 3 categories that the user might buy from. **Training Data** This file contains the detailed purchasing history for every user. It has order value and the category of the product. **Training Data Target** This file contains data for some users about the category of items they bought in future. **Test Data** This file contains the detailed purchasing history for some users. It has the order value and the category of the product. You have to predict the top 3 categories that the users with these user_ids will purchase from in the future. Kaggle dataset identifier: geoffrey-hinton-fellowship-hackathon-2 <jupyter_code>import pandas as pd df = pd.read_csv('geoffrey-hinton-fellowship-hackathon-2/Training Data.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 257407 entries, 0 to 257406 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 user_id 257407 non-null int64 1 aov 257407 non-null float64 2 category 257407 non-null object dtypes: float64(1), int64(1), object(1) memory usage: 5.9+ MB <jupyter_text>Examples: { "user_id": 37327, "aov": 29128, "category": "Phones" } { "user_id": 37327, "aov": 354, "category": "Fashion" } { "user_id": 37327, "aov": 460, "category": "Home Decor" } { "user_id": 37327, "aov": 761, "category": "Fashion" } <jupyter_code>import pandas as pd df = pd.read_csv('geoffrey-hinton-fellowship-hackathon-2/Test Data.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 172554 entries, 0 to 172553 Data columns (total 4 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 172554 non-null int64 1 user_id 172554 non-null int64 2 aov 172554 non-null float64 3 category 172554 non-null object dtypes: float64(1), int64(2), object(1) memory usage: 5.3+ MB <jupyter_text>Examples: { "Unnamed: 0": 0, "user_id": 43323, "aov": 19115, "category": "Phones" } { "Unnamed: 0": 1, "user_id": 43323, "aov": 29309, "category": "Phones" } { "Unnamed: 0": 2, "user_id": 43323, "aov": 15293, "category": "Phones" } { "Unnamed: 0": 3, "user_id": 43323, "aov": 23548, "category": "Phones" } <jupyter_code>import pandas as pd df = pd.read_csv('geoffrey-hinton-fellowship-hackathon-2/Training Data Target.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 13245 entries, 0 to 13244 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 user_id 13245 non-null int64 1 aov 13245 non-null int64 2 category 13245 non-null object dtypes: int64(2), object(1) memory usage: 310.6+ KB <jupyter_text>Examples: { "user_id": 13153, "aov": -9999, "category": "Phones" } { "user_id": 42853, "aov": -9999, "category": "Phones" } { "user_id": 30550, "aov": -9999, "category": "Phones" } { "user_id": 9797, "aov": -9999, "category": "Phones" } <jupyter_script># # Resources # * [ https://developers.google.com/machine-learning/recommendation/overview/types ] # * [ https://www.kaggle.com/ibtesama/getting-started-with-a-movie-recommendation-system ] # * [ https://www.kaggle.com/kanncaa1/recommendation-systems-tutorial ] # * [ https://www.featuretools.com/demos/ ] import os import gc import sys import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # preprocessing : from sklearn.preprocessing import MinMaxScaler, StandardScaler, scale from sklearn.preprocessing import LabelEncoder from sklearn.pipeline import Pipeline from sklearn.utils.class_weight import compute_class_weight from sklearn.cluster import KMeans # k fold from sklearn.model_selection import StratifiedKFold, train_test_split, KFold # metrics from sklearn.metrics import balanced_accuracy_score, brier_score_loss # deep learning # import tensorflow as tf # from tensorflow.keras import layers # from tensorflow.keras import Model # from tensorflow.keras import backend as K # training from catboost import CatBoostClassifier as cb # hyperparam optimization import optuna from optuna import Trial import warnings warnings.filterwarnings("ignore") # **Training Data** train = pd.read_csv("../input/geoffrey-hinton-fellowship-hackathon-2/Training Data.csv") target = pd.read_csv( "../input/geoffrey-hinton-fellowship-hackathon-2/Training Data Target.csv" ) test = pd.read_csv("../input/geoffrey-hinton-fellowship-hackathon-2/Test Data.csv") train.shape train = train[train["user_id"].isin(target["user_id"])] train.shape categories = train.category.unique() categories print("Number of Unique Users {}".format(train["user_id"].nunique())) def plot_count(df, col, title): plt.subplots(figsize=(16, 8)) sns.countplot(df[col]) plt.xticks(rotation=45) plt.title(title) plt.show() plot_count(train, "category", "training data categories") plot_count(train, "category", "training target categories") # **Creating Features** # K means # finding ideal k by elbow method: def find_ideal_k(m, df, cols): """find ideal k for clustering using elbw method""" X_temp = scale(df[cols].copy()) inertia = [] K = [i for i in range(2, m)] for k in K: km = KMeans(n_clusters=k) km.fit(X_temp) inertia.append(km.inertia_) plt.figure(figsize=(16, 8)) plt.plot(K, inertia) plt.xlabel("K") plt.ylabel("inertia") plt.show() del X_temp gc.collect() # category columns # find_ideal_k(40,X,categories) # find_ideal_k(40,X,aov_cols) # # **Feature Transformer** class feature_transformer: """make features out of data provided""" def __init__(self, test_run=False): self.test_run = test_run self.super_categories = [ "Education_Hobbies_work", "Electronics", "Home", "Personal", "Kids", ] def agg_features(self, df): agg_cols = ["median", "min", "max", "sum", "count", "mean", "std"] # train aov_features = pd.DataFrame( df.groupby("user_id").agg({"aov": agg_cols}) ).droplevel(0, axis=1) aov_features.reset_index(inplace=True) aov_features.rename({k: f"{k}_aov" for k in agg_cols}, axis=1, inplace=True) self.aov_cols = list({k: f"{k}_aov" for k in agg_cols}.values()) return aov_features def vectorize(self, df): """return a dataframe with the customer interaction with products""" df1 = pd.DataFrame() df1["user_id"] = df["user_id"].unique() categories = df["category"].unique() df1[categories] = 0 df1[[f"{cat}_spend" for cat in categories]] = 0 for user_id in df1.user_id: df_user = df[df["user_id"] == user_id] for ids, row in df_user.iterrows(): # number of times user has bought a particular category df1.loc[df1["user_id"] == user_id, row["category"]] += 1 # sum of money user has spend on that category df1.loc[df1["user_id"] == user_id, row["category"] + "_spend"] += row[ "aov" ] return df1 def avg_spend(self, row): """avg spend for each category""" categories = list(self.categories) categories.extend(list(self.super_categories)) for cat in categories: if row[cat] != 0: row[cat + "_avg" + "_spend"] = row[cat + "_spend"] / row[cat] else: row[cat + "_avg" + "_spend"] = 0 return row def sup_categ(self, df): """count of Items purchased from these super categories""" sup_categories = self.super_categories Education_Hobbies_work = [ "Back to School", "Painiting Supplies", "Laptops", "Books", "Ereaders", "Board Games", "Gaming", ] Electronics = ["Phones", "TVs", "Gaming", "Laptops", "Ereaders"] Personal = ["Fitness", "Fashion", "Consumer Durables", "Beauty Products"] Home = [ "Home Decor", "Consumer Durables", "TVs", "Groceries", "Kitchen cleaning Supplies", "Pet Supplies", ] Kids = ["Board Games", "Back to School", "Toys", "Gaming"] for cat in sup_categories: # count in super cat df[f"{cat}"] = df[eval(cat)].sum(axis=1) # amount spend in each super cat df[f"{cat}_spend"] = df[[str(i) + "_spend" for i in eval(cat)]].sum(axis=1) return df def cluster_features(self, X, X_test, num_init=50): categories = [f"{c}_spend" for c in self.categories] aov_cols = self.aov_cols sup_categories = [f"{c}_spend" for c in self.super_categories] # categories km_pipe = KMeans(n_clusters=15, n_init=num_init, init="k-means++") X["Cluster_categories"] = km_pipe.fit_predict(X[categories]) X_test["Cluster_categories"] = km_pipe.predict(X_test[categories]) # super Categories km_pipe = KMeans(n_clusters=10, n_init=num_init, init="k-means++") X["Cluster_Sup_cat"] = km_pipe.fit_predict(X[sup_categories]) X_test["Cluster_Sup_cat"] = km_pipe.predict(X_test[sup_categories]) # aov cols km_pipe = Pipeline( [ ("scale", StandardScaler()), ("kmeans", KMeans(n_clusters=10, n_init=num_init, init="k-means++")), ] ) X["Cluster_aov"] = km_pipe.fit_predict(X[aov_cols]) X_test["Cluster_aov"] = km_pipe.predict(X_test[aov_cols]) return X, X_test def transform(self, train_df, test_df): self.categories = train_df["category"].unique() if self.test_run: train_df = train_df.sample(1000) test_df = test_df.sample(1000) # train features X = self.vectorize(train_df) X = self.sup_categ(X) X = X.merge(self.agg_features(train_df), on="user_id", how="inner") X = X.apply(lambda x: self.avg_spend(x), axis=1) X = X.fillna(0) # test features X_test = self.vectorize(test_df) X_test = self.sup_categ(X_test) X_test = X_test.merge(self.agg_features(test_df), on="user_id", how="inner") X_test = X_test.apply(lambda x: self.avg_spend(x), axis=1) X_test = X_test.fillna(0) # cluster X, X_test = self.cluster_features(X=X, X_test=X_test) return X, X_test # make features feature_transformer = feature_transformer(test_run=False) X, X_test = feature_transformer.transform(train_df=train, test_df=test) # merge target X = X.merge(target[["user_id", "category"]], on="user_id", how="inner") # reset index X.reset_index(inplace=True, drop=True) X_test.reset_index(inplace=True, drop=True) X.shape # encoding target category label_enc = LabelEncoder() X["category"] = label_enc.fit_transform(X["category"]) classes = label_enc.classes_ # num_classes num_classes = len(classes) num_classes # train data y = X.pop("category") tr_class = y.unique() # cal class weights if required weights = compute_class_weight(class_weight="balanced", classes=tr_class, y=y) class_weights = dict(zip(tr_class, weights)) assert set(X.columns) == set(X_test.columns), "columns donot match" # user ids train_user_ids = X.pop("user_id") test_user_ids = X_test.pop("user_id") # columns with categorical features categorical_columns = ["Cluster_categories", "Cluster_aov", "Cluster_Sup_cat"] X["Cluster_categories"].plot(kind="hist") # # **OPTUNA Hyperparam optimization** def objective(trial: Trial): # splitting training data x_train, x_test, y_train, y_test = train_test_split( X, y, random_state=7, train_size=0.7, stratify=y ) # hyperparam_grid params = { "verbose": 0, "loss_function": "MultiClass", "classes_count": len(classes), "depth": trial.suggest_int("depth", 4, 8), "learning_rate": trial.suggest_loguniform("learning_rate", 1e-3, 1e-1), "l2_leaf_reg": trial.suggest_loguniform("l2_leaf_reg", 1e-2, 10.0), "random_strength": trial.suggest_uniform("random_strength", 1e-2, 0.3), "max_bin": trial.suggest_int("max_bin", 64, 254), # 'grow_policy' :trial.suggest_categorical('grow_policy', # ['SymmetricTree','Depthwise','Lossguide']), "iterations": trial.suggest_int("iterations", 1000, 2000), # 'max_leaves' :trial.suggest_int('max_leaves',2,64), "colsample_bylevel": trial.suggest_float("colsample_bylevel", 0.1, 0.6), # "boosting_type": trial.suggest_categorical("boosting_type", ["Ordered", "Plain"]), "bootstrap_type": "MVS", # trial.suggest_categorical("bootstrap_type", # ["Bayesian", "MVS",'Bernoulli']), "eval_metric": "MultiClass", } try: model = cb(**params) model.fit( x_train, y_train, eval_set=[(x_test, y_test)], verbose=0, cat_features=categorical_columns, early_stopping_rounds=300, ) # class_weights=cw) preds = model.predict(x_test) acc = balanced_accuracy_score(y_test, preds) return acc except Exception as e: print(e) return None def get_best_params(time_out=9000): sampler = optuna.samplers.TPESampler( seed=7 ) # Make the sampler behave in a deterministic way. study = optuna.create_study(direction="maximize", sampler=sampler) study.optimize(objective, n_trials=300, timeout=time_out) print("Number of finished trials: {}".format(len(study.trials))) return study.best_trial.params # best_params=get_best_params() # 29/7/21 best_params = { "verbose": 0, "loss_function": "MultiClass", "classes_count": len(classes), "bootstrap_type": "MVS", "depth": 4, "learning_rate": 0.06365747563313634, "l2_leaf_reg": 0.14164990508678563, "random_strength": 0.18177681941882445, "max_bin": 254, "iterations": 3000, # 1505 "colsample_bylevel": 0.4732653286911665, } # 26/7/21 # best_params={ # 'verbose' : 0, # 'loss_function' :'MultiClass', # 'classes_count' : len(classes), # "bootstrap_type": 'MVS', # 'depth': 5, # 'learning_rate': 0.09962164243695339, # 'l2_leaf_reg': 1.2575960096355996, # 'random_strength': 0.07176128132724813, # 'max_bin': 220, # 'iterations': 1206, # 'colsample_bylevel': 0.5598653634090404} # 25/7/21 # best_params={ # 'verbose' : 0, # 'loss_function' :'MultiClass', # 'classes_count' : len(classes), # "bootstrap_type": 'MVS', # 'depth': 4, # 'learning_rate': 0.07240696360883225, # 'l2_leaf_reg': 0.37748668450031264, # 'random_strength': 0.14114940406399043, # 'max_bin': 170, 'iterations': 2148, # 'colsample_bylevel': 0.41308547541216933} # # Model Fitting and Prediction def k_fold_predict(k, params=best_params): skf = StratifiedKFold(n_splits=k) mean_preds = np.zeros(shape=(X_test.shape[0], len(classes))) train_check = np.zeros(shape=(X.shape[0], len(classes))) for train_idx, val_idx in skf.split(X, y): x_t, x_v = X.iloc[train_idx], X.iloc[val_idx] y_t, y_v = y.iloc[train_idx], y.iloc[val_idx] model = cb(**params) model.fit(x_t, y_t, cat_features=categorical_columns) print( "Validation score {}".format( balanced_accuracy_score(y_v, model.predict(x_v)) ) ) # test predictions mean_preds += model.predict(X_test, prediction_type="Probability") # training preds train_check += model.predict(X, prediction_type="Probability") mean_preds = mean_preds / k train_check = train_check / k return mean_preds, train_check preds, train_ch = k_fold_predict(30) preds = pd.DataFrame(preds) train_preds = pd.DataFrame(train_ch) preds.rename(columns={i: classes[i] for i in range(len(classes))}, inplace=True) preds.head() # mean predictions for each class np.mean(preds, axis=0) # mean predictions for each class(training_data) pd.DataFrame( {"predicted_ratio": np.mean(train_preds, axis=0), "actual_ratio": y.value_counts(1)} ) # **Predicting Top Three categories based on predicted Probablities** def get_top_cat(row, classes): """get the top 3 predicted categories""" prob, top_3 = zip(*sorted(zip(row.values, classes), reverse=True)[:3]) top_3 = ", ".join(top_3) return top_3 get_top_cat(preds.iloc[0], classes) get_top_cat(preds.iloc[180], classes) df_preds = pd.DataFrame() df_preds["user_id"] = test_user_ids df_preds["pred3"] = 0 for i in range(len(preds)): df_preds.loc[i, "pred3"] = get_top_cat(row=preds.iloc[i, :], classes=classes) df_preds.head() df_preds.to_csv("submission.csv", index=False) # def mean_relevance_rank(top_category,predicted_categories): # '''get a mean relevance rank for a given prediction.''' # mrr=float(1/(1+i) for i in range(len(predicted_categories.split(','))) \ # if predicted_categories.split(',')[i]==top_category else 0) # return mrr # def precision_rank(top_category,predicted_categories): # '''return a precision rank for given prediction''' # pr=1 if top_catgory in predicted_categories else 0 # return pr
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/549/69549332.ipynb
geoffrey-hinton-fellowship-hackathon-2
rupeshrk3
[{"Id": 69549332, "ScriptId": 18294422, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5466652, "CreationDate": "08/01/2021 11:51:32", "VersionNumber": 2.0, "Title": "UnivAI GHF1", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 513.0, "LinesInsertedFromPrevious": 407.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 106.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
[{"Id": 92863888, "KernelVersionId": 69549332, "SourceDatasetVersionId": 2378960}]
[{"Id": 2378960, "DatasetId": 1437628, "DatasourceVersionId": 2420855, "CreatorUserId": 4932840, "LicenseName": "Unknown", "CreationDate": "06/29/2021 07:12:19", "VersionNumber": 1.0, "Title": "Geoffrey Hinton Fellowship, Hackathon 2", "Slug": "geoffrey-hinton-fellowship-hackathon-2", "Subtitle": NaN, "Description": "An e-commerce company wants to recommend products to its users. The company has collected only transaction data in the past. The training dataset has only 3 columns - user_id, Product bought and Order value of the product. Using this dataset, predict for all the users in the training dataset, the top 3 categories that the user might buy from.\n\n**Training Data**\nThis file contains the detailed purchasing history for every user. It has order value and the category of the product.\n\n**Training Data Target**\nThis file contains data for some users about the category of items they bought in future.\n\n**Test Data**\nThis file contains the detailed purchasing history for some users. It has the order value and the category of the product. You have to predict the top 3 categories that the users with these user_ids will purchase from in the future.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1437628, "CreatorUserId": 4932840, "OwnerUserId": 4932840.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2378960.0, "CurrentDatasourceVersionId": 2420855.0, "ForumId": 1457099, "Type": 2, "CreationDate": "06/29/2021 07:12:19", "LastActivityDate": "06/29/2021", "TotalViews": 991, "TotalDownloads": 8, "TotalVotes": 5, "TotalKernels": 1}]
[{"Id": 4932840, "UserName": "rupeshrk3", "DisplayName": "Rupesh Kumar", "RegisterDate": "04/22/2020", "PerformanceTier": 1}]
# # Resources # * [ https://developers.google.com/machine-learning/recommendation/overview/types ] # * [ https://www.kaggle.com/ibtesama/getting-started-with-a-movie-recommendation-system ] # * [ https://www.kaggle.com/kanncaa1/recommendation-systems-tutorial ] # * [ https://www.featuretools.com/demos/ ] import os import gc import sys import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # preprocessing : from sklearn.preprocessing import MinMaxScaler, StandardScaler, scale from sklearn.preprocessing import LabelEncoder from sklearn.pipeline import Pipeline from sklearn.utils.class_weight import compute_class_weight from sklearn.cluster import KMeans # k fold from sklearn.model_selection import StratifiedKFold, train_test_split, KFold # metrics from sklearn.metrics import balanced_accuracy_score, brier_score_loss # deep learning # import tensorflow as tf # from tensorflow.keras import layers # from tensorflow.keras import Model # from tensorflow.keras import backend as K # training from catboost import CatBoostClassifier as cb # hyperparam optimization import optuna from optuna import Trial import warnings warnings.filterwarnings("ignore") # **Training Data** train = pd.read_csv("../input/geoffrey-hinton-fellowship-hackathon-2/Training Data.csv") target = pd.read_csv( "../input/geoffrey-hinton-fellowship-hackathon-2/Training Data Target.csv" ) test = pd.read_csv("../input/geoffrey-hinton-fellowship-hackathon-2/Test Data.csv") train.shape train = train[train["user_id"].isin(target["user_id"])] train.shape categories = train.category.unique() categories print("Number of Unique Users {}".format(train["user_id"].nunique())) def plot_count(df, col, title): plt.subplots(figsize=(16, 8)) sns.countplot(df[col]) plt.xticks(rotation=45) plt.title(title) plt.show() plot_count(train, "category", "training data categories") plot_count(train, "category", "training target categories") # **Creating Features** # K means # finding ideal k by elbow method: def find_ideal_k(m, df, cols): """find ideal k for clustering using elbw method""" X_temp = scale(df[cols].copy()) inertia = [] K = [i for i in range(2, m)] for k in K: km = KMeans(n_clusters=k) km.fit(X_temp) inertia.append(km.inertia_) plt.figure(figsize=(16, 8)) plt.plot(K, inertia) plt.xlabel("K") plt.ylabel("inertia") plt.show() del X_temp gc.collect() # category columns # find_ideal_k(40,X,categories) # find_ideal_k(40,X,aov_cols) # # **Feature Transformer** class feature_transformer: """make features out of data provided""" def __init__(self, test_run=False): self.test_run = test_run self.super_categories = [ "Education_Hobbies_work", "Electronics", "Home", "Personal", "Kids", ] def agg_features(self, df): agg_cols = ["median", "min", "max", "sum", "count", "mean", "std"] # train aov_features = pd.DataFrame( df.groupby("user_id").agg({"aov": agg_cols}) ).droplevel(0, axis=1) aov_features.reset_index(inplace=True) aov_features.rename({k: f"{k}_aov" for k in agg_cols}, axis=1, inplace=True) self.aov_cols = list({k: f"{k}_aov" for k in agg_cols}.values()) return aov_features def vectorize(self, df): """return a dataframe with the customer interaction with products""" df1 = pd.DataFrame() df1["user_id"] = df["user_id"].unique() categories = df["category"].unique() df1[categories] = 0 df1[[f"{cat}_spend" for cat in categories]] = 0 for user_id in df1.user_id: df_user = df[df["user_id"] == user_id] for ids, row in df_user.iterrows(): # number of times user has bought a particular category df1.loc[df1["user_id"] == user_id, row["category"]] += 1 # sum of money user has spend on that category df1.loc[df1["user_id"] == user_id, row["category"] + "_spend"] += row[ "aov" ] return df1 def avg_spend(self, row): """avg spend for each category""" categories = list(self.categories) categories.extend(list(self.super_categories)) for cat in categories: if row[cat] != 0: row[cat + "_avg" + "_spend"] = row[cat + "_spend"] / row[cat] else: row[cat + "_avg" + "_spend"] = 0 return row def sup_categ(self, df): """count of Items purchased from these super categories""" sup_categories = self.super_categories Education_Hobbies_work = [ "Back to School", "Painiting Supplies", "Laptops", "Books", "Ereaders", "Board Games", "Gaming", ] Electronics = ["Phones", "TVs", "Gaming", "Laptops", "Ereaders"] Personal = ["Fitness", "Fashion", "Consumer Durables", "Beauty Products"] Home = [ "Home Decor", "Consumer Durables", "TVs", "Groceries", "Kitchen cleaning Supplies", "Pet Supplies", ] Kids = ["Board Games", "Back to School", "Toys", "Gaming"] for cat in sup_categories: # count in super cat df[f"{cat}"] = df[eval(cat)].sum(axis=1) # amount spend in each super cat df[f"{cat}_spend"] = df[[str(i) + "_spend" for i in eval(cat)]].sum(axis=1) return df def cluster_features(self, X, X_test, num_init=50): categories = [f"{c}_spend" for c in self.categories] aov_cols = self.aov_cols sup_categories = [f"{c}_spend" for c in self.super_categories] # categories km_pipe = KMeans(n_clusters=15, n_init=num_init, init="k-means++") X["Cluster_categories"] = km_pipe.fit_predict(X[categories]) X_test["Cluster_categories"] = km_pipe.predict(X_test[categories]) # super Categories km_pipe = KMeans(n_clusters=10, n_init=num_init, init="k-means++") X["Cluster_Sup_cat"] = km_pipe.fit_predict(X[sup_categories]) X_test["Cluster_Sup_cat"] = km_pipe.predict(X_test[sup_categories]) # aov cols km_pipe = Pipeline( [ ("scale", StandardScaler()), ("kmeans", KMeans(n_clusters=10, n_init=num_init, init="k-means++")), ] ) X["Cluster_aov"] = km_pipe.fit_predict(X[aov_cols]) X_test["Cluster_aov"] = km_pipe.predict(X_test[aov_cols]) return X, X_test def transform(self, train_df, test_df): self.categories = train_df["category"].unique() if self.test_run: train_df = train_df.sample(1000) test_df = test_df.sample(1000) # train features X = self.vectorize(train_df) X = self.sup_categ(X) X = X.merge(self.agg_features(train_df), on="user_id", how="inner") X = X.apply(lambda x: self.avg_spend(x), axis=1) X = X.fillna(0) # test features X_test = self.vectorize(test_df) X_test = self.sup_categ(X_test) X_test = X_test.merge(self.agg_features(test_df), on="user_id", how="inner") X_test = X_test.apply(lambda x: self.avg_spend(x), axis=1) X_test = X_test.fillna(0) # cluster X, X_test = self.cluster_features(X=X, X_test=X_test) return X, X_test # make features feature_transformer = feature_transformer(test_run=False) X, X_test = feature_transformer.transform(train_df=train, test_df=test) # merge target X = X.merge(target[["user_id", "category"]], on="user_id", how="inner") # reset index X.reset_index(inplace=True, drop=True) X_test.reset_index(inplace=True, drop=True) X.shape # encoding target category label_enc = LabelEncoder() X["category"] = label_enc.fit_transform(X["category"]) classes = label_enc.classes_ # num_classes num_classes = len(classes) num_classes # train data y = X.pop("category") tr_class = y.unique() # cal class weights if required weights = compute_class_weight(class_weight="balanced", classes=tr_class, y=y) class_weights = dict(zip(tr_class, weights)) assert set(X.columns) == set(X_test.columns), "columns donot match" # user ids train_user_ids = X.pop("user_id") test_user_ids = X_test.pop("user_id") # columns with categorical features categorical_columns = ["Cluster_categories", "Cluster_aov", "Cluster_Sup_cat"] X["Cluster_categories"].plot(kind="hist") # # **OPTUNA Hyperparam optimization** def objective(trial: Trial): # splitting training data x_train, x_test, y_train, y_test = train_test_split( X, y, random_state=7, train_size=0.7, stratify=y ) # hyperparam_grid params = { "verbose": 0, "loss_function": "MultiClass", "classes_count": len(classes), "depth": trial.suggest_int("depth", 4, 8), "learning_rate": trial.suggest_loguniform("learning_rate", 1e-3, 1e-1), "l2_leaf_reg": trial.suggest_loguniform("l2_leaf_reg", 1e-2, 10.0), "random_strength": trial.suggest_uniform("random_strength", 1e-2, 0.3), "max_bin": trial.suggest_int("max_bin", 64, 254), # 'grow_policy' :trial.suggest_categorical('grow_policy', # ['SymmetricTree','Depthwise','Lossguide']), "iterations": trial.suggest_int("iterations", 1000, 2000), # 'max_leaves' :trial.suggest_int('max_leaves',2,64), "colsample_bylevel": trial.suggest_float("colsample_bylevel", 0.1, 0.6), # "boosting_type": trial.suggest_categorical("boosting_type", ["Ordered", "Plain"]), "bootstrap_type": "MVS", # trial.suggest_categorical("bootstrap_type", # ["Bayesian", "MVS",'Bernoulli']), "eval_metric": "MultiClass", } try: model = cb(**params) model.fit( x_train, y_train, eval_set=[(x_test, y_test)], verbose=0, cat_features=categorical_columns, early_stopping_rounds=300, ) # class_weights=cw) preds = model.predict(x_test) acc = balanced_accuracy_score(y_test, preds) return acc except Exception as e: print(e) return None def get_best_params(time_out=9000): sampler = optuna.samplers.TPESampler( seed=7 ) # Make the sampler behave in a deterministic way. study = optuna.create_study(direction="maximize", sampler=sampler) study.optimize(objective, n_trials=300, timeout=time_out) print("Number of finished trials: {}".format(len(study.trials))) return study.best_trial.params # best_params=get_best_params() # 29/7/21 best_params = { "verbose": 0, "loss_function": "MultiClass", "classes_count": len(classes), "bootstrap_type": "MVS", "depth": 4, "learning_rate": 0.06365747563313634, "l2_leaf_reg": 0.14164990508678563, "random_strength": 0.18177681941882445, "max_bin": 254, "iterations": 3000, # 1505 "colsample_bylevel": 0.4732653286911665, } # 26/7/21 # best_params={ # 'verbose' : 0, # 'loss_function' :'MultiClass', # 'classes_count' : len(classes), # "bootstrap_type": 'MVS', # 'depth': 5, # 'learning_rate': 0.09962164243695339, # 'l2_leaf_reg': 1.2575960096355996, # 'random_strength': 0.07176128132724813, # 'max_bin': 220, # 'iterations': 1206, # 'colsample_bylevel': 0.5598653634090404} # 25/7/21 # best_params={ # 'verbose' : 0, # 'loss_function' :'MultiClass', # 'classes_count' : len(classes), # "bootstrap_type": 'MVS', # 'depth': 4, # 'learning_rate': 0.07240696360883225, # 'l2_leaf_reg': 0.37748668450031264, # 'random_strength': 0.14114940406399043, # 'max_bin': 170, 'iterations': 2148, # 'colsample_bylevel': 0.41308547541216933} # # Model Fitting and Prediction def k_fold_predict(k, params=best_params): skf = StratifiedKFold(n_splits=k) mean_preds = np.zeros(shape=(X_test.shape[0], len(classes))) train_check = np.zeros(shape=(X.shape[0], len(classes))) for train_idx, val_idx in skf.split(X, y): x_t, x_v = X.iloc[train_idx], X.iloc[val_idx] y_t, y_v = y.iloc[train_idx], y.iloc[val_idx] model = cb(**params) model.fit(x_t, y_t, cat_features=categorical_columns) print( "Validation score {}".format( balanced_accuracy_score(y_v, model.predict(x_v)) ) ) # test predictions mean_preds += model.predict(X_test, prediction_type="Probability") # training preds train_check += model.predict(X, prediction_type="Probability") mean_preds = mean_preds / k train_check = train_check / k return mean_preds, train_check preds, train_ch = k_fold_predict(30) preds = pd.DataFrame(preds) train_preds = pd.DataFrame(train_ch) preds.rename(columns={i: classes[i] for i in range(len(classes))}, inplace=True) preds.head() # mean predictions for each class np.mean(preds, axis=0) # mean predictions for each class(training_data) pd.DataFrame( {"predicted_ratio": np.mean(train_preds, axis=0), "actual_ratio": y.value_counts(1)} ) # **Predicting Top Three categories based on predicted Probablities** def get_top_cat(row, classes): """get the top 3 predicted categories""" prob, top_3 = zip(*sorted(zip(row.values, classes), reverse=True)[:3]) top_3 = ", ".join(top_3) return top_3 get_top_cat(preds.iloc[0], classes) get_top_cat(preds.iloc[180], classes) df_preds = pd.DataFrame() df_preds["user_id"] = test_user_ids df_preds["pred3"] = 0 for i in range(len(preds)): df_preds.loc[i, "pred3"] = get_top_cat(row=preds.iloc[i, :], classes=classes) df_preds.head() df_preds.to_csv("submission.csv", index=False) # def mean_relevance_rank(top_category,predicted_categories): # '''get a mean relevance rank for a given prediction.''' # mrr=float(1/(1+i) for i in range(len(predicted_categories.split(','))) \ # if predicted_categories.split(',')[i]==top_category else 0) # return mrr # def precision_rank(top_category,predicted_categories): # '''return a precision rank for given prediction''' # pr=1 if top_catgory in predicted_categories else 0 # return pr
[{"geoffrey-hinton-fellowship-hackathon-2/Training Data.csv": {"column_names": "[\"user_id\", \"aov\", \"category\"]", "column_data_types": "{\"user_id\": \"int64\", \"aov\": \"float64\", \"category\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 257407 entries, 0 to 257406\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 user_id 257407 non-null int64 \n 1 aov 257407 non-null float64\n 2 category 257407 non-null object \ndtypes: float64(1), int64(1), object(1)\nmemory usage: 5.9+ MB\n", "summary": "{\"user_id\": {\"count\": 257407.0, \"mean\": 25040.004083028045, \"std\": 14454.553687371295, \"min\": 1.0, \"25%\": 12569.0, \"50%\": 25090.0, \"75%\": 37542.0, \"max\": 50082.0}, \"aov\": {\"count\": 257407.0, \"mean\": 6808.788032182497, \"std\": 10547.69071650145, \"min\": 10.0, \"25%\": 545.0, \"50%\": 845.0, \"75%\": 11913.5, \"max\": 109510.0}}", "examples": "{\"user_id\":{\"0\":37327,\"1\":37327,\"2\":37327,\"3\":37327},\"aov\":{\"0\":29128.0,\"1\":354.0,\"2\":460.0,\"3\":761.0},\"category\":{\"0\":\"Phones\",\"1\":\"Fashion\",\"2\":\"Home Decor\",\"3\":\"Fashion\"}}"}}, {"geoffrey-hinton-fellowship-hackathon-2/Test Data.csv": {"column_names": "[\"Unnamed: 0\", \"user_id\", \"aov\", \"category\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"user_id\": \"int64\", \"aov\": \"float64\", \"category\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 172554 entries, 0 to 172553\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 172554 non-null int64 \n 1 user_id 172554 non-null int64 \n 2 aov 172554 non-null float64\n 3 category 172554 non-null object \ndtypes: float64(1), int64(2), object(1)\nmemory usage: 5.3+ MB\n", "summary": "{\"Unnamed: 0\": {\"count\": 172554.0, \"mean\": 43138.10561911054, \"std\": 24906.27969216105, \"min\": 0.0, \"25%\": 21569.0, \"50%\": 43138.0, \"75%\": 64707.0, \"max\": 86411.0}, \"user_id\": {\"count\": 172554.0, \"mean\": 24926.601979670133, \"std\": 14381.095224793373, \"min\": 2.0, \"25%\": 12442.0, \"50%\": 24816.0, \"75%\": 37449.0, \"max\": 50089.0}, \"aov\": {\"count\": 172554.0, \"mean\": 6851.3059969632695, \"std\": 10525.429006301467, \"min\": 10.0, \"25%\": 545.0, \"50%\": 849.0, \"75%\": 12048.0, \"max\": 105861.0}}", "examples": "{\"Unnamed: 0\":{\"0\":0,\"1\":1,\"2\":2,\"3\":3},\"user_id\":{\"0\":43323,\"1\":43323,\"2\":43323,\"3\":43323},\"aov\":{\"0\":19115.0,\"1\":29309.0,\"2\":15293.0,\"3\":23548.0},\"category\":{\"0\":\"Phones\",\"1\":\"Phones\",\"2\":\"Phones\",\"3\":\"Phones\"}}"}}, {"geoffrey-hinton-fellowship-hackathon-2/Training Data Target.csv": {"column_names": "[\"user_id\", \"aov\", \"category\"]", "column_data_types": "{\"user_id\": \"int64\", \"aov\": \"int64\", \"category\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 13245 entries, 0 to 13244\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 user_id 13245 non-null int64 \n 1 aov 13245 non-null int64 \n 2 category 13245 non-null object\ndtypes: int64(2), object(1)\nmemory usage: 310.6+ KB\n", "summary": "{\"user_id\": {\"count\": 13245.0, \"mean\": 25096.183540958853, \"std\": 14519.917378378328, \"min\": 1.0, \"25%\": 12549.0, \"50%\": 25062.0, \"75%\": 37708.0, \"max\": 50079.0}, \"aov\": {\"count\": 13245.0, \"mean\": -9999.0, \"std\": 0.0, \"min\": -9999.0, \"25%\": -9999.0, \"50%\": -9999.0, \"75%\": -9999.0, \"max\": -9999.0}}", "examples": "{\"user_id\":{\"0\":13153,\"1\":42853,\"2\":30550,\"3\":9797},\"aov\":{\"0\":-9999,\"1\":-9999,\"2\":-9999,\"3\":-9999},\"category\":{\"0\":\"Phones\",\"1\":\"Phones\",\"2\":\"Phones\",\"3\":\"Phones\"}}"}}]
true
3
<start_data_description><data_path>geoffrey-hinton-fellowship-hackathon-2/Training Data.csv: <column_names> ['user_id', 'aov', 'category'] <column_types> {'user_id': 'int64', 'aov': 'float64', 'category': 'object'} <dataframe_Summary> {'user_id': {'count': 257407.0, 'mean': 25040.004083028045, 'std': 14454.553687371295, 'min': 1.0, '25%': 12569.0, '50%': 25090.0, '75%': 37542.0, 'max': 50082.0}, 'aov': {'count': 257407.0, 'mean': 6808.788032182497, 'std': 10547.69071650145, 'min': 10.0, '25%': 545.0, '50%': 845.0, '75%': 11913.5, 'max': 109510.0}} <dataframe_info> RangeIndex: 257407 entries, 0 to 257406 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 user_id 257407 non-null int64 1 aov 257407 non-null float64 2 category 257407 non-null object dtypes: float64(1), int64(1), object(1) memory usage: 5.9+ MB <some_examples> {'user_id': {'0': 37327, '1': 37327, '2': 37327, '3': 37327}, 'aov': {'0': 29128.0, '1': 354.0, '2': 460.0, '3': 761.0}, 'category': {'0': 'Phones', '1': 'Fashion', '2': 'Home Decor', '3': 'Fashion'}} <end_description> <start_data_description><data_path>geoffrey-hinton-fellowship-hackathon-2/Test Data.csv: <column_names> ['Unnamed: 0', 'user_id', 'aov', 'category'] <column_types> {'Unnamed: 0': 'int64', 'user_id': 'int64', 'aov': 'float64', 'category': 'object'} <dataframe_Summary> {'Unnamed: 0': {'count': 172554.0, 'mean': 43138.10561911054, 'std': 24906.27969216105, 'min': 0.0, '25%': 21569.0, '50%': 43138.0, '75%': 64707.0, 'max': 86411.0}, 'user_id': {'count': 172554.0, 'mean': 24926.601979670133, 'std': 14381.095224793373, 'min': 2.0, '25%': 12442.0, '50%': 24816.0, '75%': 37449.0, 'max': 50089.0}, 'aov': {'count': 172554.0, 'mean': 6851.3059969632695, 'std': 10525.429006301467, 'min': 10.0, '25%': 545.0, '50%': 849.0, '75%': 12048.0, 'max': 105861.0}} <dataframe_info> RangeIndex: 172554 entries, 0 to 172553 Data columns (total 4 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Unnamed: 0 172554 non-null int64 1 user_id 172554 non-null int64 2 aov 172554 non-null float64 3 category 172554 non-null object dtypes: float64(1), int64(2), object(1) memory usage: 5.3+ MB <some_examples> {'Unnamed: 0': {'0': 0, '1': 1, '2': 2, '3': 3}, 'user_id': {'0': 43323, '1': 43323, '2': 43323, '3': 43323}, 'aov': {'0': 19115.0, '1': 29309.0, '2': 15293.0, '3': 23548.0}, 'category': {'0': 'Phones', '1': 'Phones', '2': 'Phones', '3': 'Phones'}} <end_description> <start_data_description><data_path>geoffrey-hinton-fellowship-hackathon-2/Training Data Target.csv: <column_names> ['user_id', 'aov', 'category'] <column_types> {'user_id': 'int64', 'aov': 'int64', 'category': 'object'} <dataframe_Summary> {'user_id': {'count': 13245.0, 'mean': 25096.183540958853, 'std': 14519.917378378328, 'min': 1.0, '25%': 12549.0, '50%': 25062.0, '75%': 37708.0, 'max': 50079.0}, 'aov': {'count': 13245.0, 'mean': -9999.0, 'std': 0.0, 'min': -9999.0, '25%': -9999.0, '50%': -9999.0, '75%': -9999.0, 'max': -9999.0}} <dataframe_info> RangeIndex: 13245 entries, 0 to 13244 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 user_id 13245 non-null int64 1 aov 13245 non-null int64 2 category 13245 non-null object dtypes: int64(2), object(1) memory usage: 310.6+ KB <some_examples> {'user_id': {'0': 13153, '1': 42853, '2': 30550, '3': 9797}, 'aov': {'0': -9999, '1': -9999, '2': -9999, '3': -9999}, 'category': {'0': 'Phones', '1': 'Phones', '2': 'Phones', '3': 'Phones'}} <end_description>
4,717
2
6,020
4,717
69003408
<jupyter_start><jupyter_text>[DEPRECATED] keras-applications DEPRECATED, USE: https://www.kaggle.com/datasets/xhlulu/keras-applications Kaggle dataset identifier: kerasapplications <jupyter_script>import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os from PIL import Image from tqdm.auto import tqdm import pydicom from pydicom.pixel_data_handlers.util import apply_voi_lut import efficientnet.tfkeras as efn import tensorflow as tf # [PYDICOM Condo Helper Ref.](https://www.kaggle.com/awsaf49/pydicom-conda-helper/) # # Read Submission CSV df = pd.read_csv("../input/siim-covid19-detection/sample_submission.csv") if df.shape[0] == 2477: fast_sub = True fast_df = pd.DataFrame( ( [ ["00086460a852_study", "negative 1 0 0 1 1"], ["000c9c05fd14_study", "negative 1 0 0 1 1"], ["65761e66de9f_image", "none 1 0 0 1 1"], ["51759b5579bc_image", "none 1 0 0 1 1"], ] ), columns=["id", "PredictionString"], ) else: fast_sub = False print(fast_sub) # # Conver .DCM File to png/jpg def read_xray(path, voi_lut=True, fix_monochrome=True): # Original from: https://www.kaggle.com/raddar/convert-dicom-to-np-array-the-correct-way dicom = pydicom.read_file(path) # VOI LUT (if available by DICOM device) is used to transform raw DICOM data to # "human-friendly" view if voi_lut: data = apply_voi_lut(dicom.pixel_array, dicom) else: data = dicom.pixel_array # depending on this value, X-ray may look inverted - fix that: if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1": data = np.amax(data) - data data = data - np.min(data) data = data / np.max(data) data = (data * 255).astype(np.uint8) return data def resize(array, size, keep_ratio=False, resample=Image.LANCZOS): # Original from: https://www.kaggle.com/xhlulu/vinbigdata-process-and-resize-to-image im = Image.fromarray(array) if keep_ratio: im.thumbnail((size, size), resample) else: im = im.resize((size, size), resample) return im # # Preprocess -'/kaggle/working' # * Convert/Resize/Store Study ID - Images # * Convert/Resize/Store Image ID - Images split = "test" save_dir = f"/kaggle/working/{split}/" os.makedirs(save_dir, exist_ok=True) save_dir = f"/kaggle/working/{split}/study/" os.makedirs(save_dir, exist_ok=True) if fast_sub: xray = read_xray( "../input/siim-covid19-detection/train/00086460a852/9e8302230c91/65761e66de9f.dcm" ) im = resize(xray, size=600) study = "00086460a852" + "_study.png" im.save(os.path.join(save_dir, study)) xray = read_xray( "../input/siim-covid19-detection/train/000c9c05fd14/e555410bd2cd/51759b5579bc.dcm" ) im = resize(xray, size=600) study = "000c9c05fd14" + "_study.png" im.save(os.path.join(save_dir, study)) else: for dirname, _, filenames in tqdm( os.walk(f"../input/siim-covid19-detection/{split}") ): for file in filenames: # set keep_ratio=True to have original aspect ratio xray = read_xray(os.path.join(dirname, file)) im = resize(xray, size=600) study = dirname.split("/")[-2] + "_study.png" im.save(os.path.join(save_dir, study)) image_id = [] dim0 = [] dim1 = [] splits = [] save_dir = f"/kaggle/working/{split}/image/" os.makedirs(save_dir, exist_ok=True) if fast_sub: xray = read_xray( "../input/siim-covid19-detection/train/00086460a852/9e8302230c91/65761e66de9f.dcm" ) im = resize(xray, size=512) im.save(os.path.join(save_dir, "65761e66de9f_image.png")) image_id.append("65761e66de9f.dcm".replace(".dcm", "")) dim0.append(xray.shape[0]) dim1.append(xray.shape[1]) splits.append(split) xray = read_xray( "../input/siim-covid19-detection/train/000c9c05fd14/e555410bd2cd/51759b5579bc.dcm" ) im = resize(xray, size=512) im.save(os.path.join(save_dir, "51759b5579bc_image.png")) image_id.append("51759b5579bc.dcm".replace(".dcm", "")) dim0.append(xray.shape[0]) dim1.append(xray.shape[1]) splits.append(split) else: for dirname, _, filenames in tqdm( os.walk(f"../input/siim-covid19-detection/{split}") ): for file in filenames: # set keep_ratio=True to have original aspect ratio xray = read_xray(os.path.join(dirname, file)) im = resize(xray, size=512) im.save(os.path.join(save_dir, file.replace(".dcm", "_image.png"))) image_id.append(file.replace(".dcm", "")) dim0.append(xray.shape[0]) dim1.append(xray.shape[1]) splits.append(split) meta = pd.DataFrame.from_dict( {"image_id": image_id, "dim0": dim0, "dim1": dim1, "split": splits} ) # # Predict Study - Use Case if fast_sub: df = fast_df.copy() else: df = pd.read_csv("../input/siim-covid19-detection/sample_submission.csv") id_laststr_list = [] for i in range(df.shape[0]): id_laststr_list.append(df.loc[i, "id"][-1]) df["id_last_str"] = id_laststr_list study_len = df[df["id_last_str"] == "y"].shape[0] # # Import Keras efficientnet model def auto_select_accelerator(): try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print("Running on TPU:", tpu.master()) except ValueError: strategy = tf.distribute.get_strategy() print(f"Running on {strategy.num_replicas_in_sync} replicas") return strategy def build_decoder(with_labels=True, target_size=(300, 300), ext="jpg"): def decode(path): file_bytes = tf.io.read_file(path) if ext == "png": img = tf.image.decode_png(file_bytes, channels=3) elif ext in ["jpg", "jpeg"]: img = tf.image.decode_jpeg(file_bytes, channels=3) else: raise ValueError("Image extension not supported") img = tf.cast(img, tf.float32) / 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return decode(path), label return decode_with_labels if with_labels else decode def build_augmenter(with_labels=True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) return img def augment_with_labels(img, label): return augment(img), label return augment_with_labels if with_labels else augment def build_dataset( paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir="", ): if cache_dir != "" and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else (paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls=AUTO) dset = dset.cache(cache_dir) if cache else dset dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle) if shuffle else dset dset = dset.batch(bsize).prefetch(AUTO) return dset # COMPETITION_NAME = "siim-cov19-test-img512-study-600" strategy = auto_select_accelerator() BATCH_SIZE = strategy.num_replicas_in_sync * 16 IMSIZE = (224, 240, 260, 300, 380, 456, 528, 600, 512) # load_dir = f"/kaggle/input/{COMPETITION_NAME}/" if fast_sub: sub_df = fast_df.copy() else: sub_df = pd.read_csv("../input/siim-covid19-detection/sample_submission.csv") sub_df = sub_df[:study_len] test_paths = f"/kaggle/working/{split}/study/" + sub_df["id"] + ".png" sub_df["negative"] = 0 sub_df["typical"] = 0 sub_df["indeterminate"] = 0 sub_df["atypical"] = 0 label_cols = sub_df.columns[2:] test_decoder = build_decoder( with_labels=False, target_size=(IMSIZE[7], IMSIZE[7]), ext="png" ) dtest = build_dataset( test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder, ) with strategy.scope(): models = [] models0 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-study/model0.h5" ) models1 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-study/model1.h5" ) models2 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-study/model2.h5" ) models3 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-study/model3.h5" ) models4 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-study/model4.h5" ) models.append(models0) models.append(models1) models.append(models2) models.append(models3) models.append(models4) sub_df[label_cols] = sum([model.predict(dtest, verbose=1) for model in models]) / len( models ) sub_df.columns = [ "id", "PredictionString1", "negative", "typical", "indeterminate", "atypical", ] df = pd.merge(df, sub_df, on="id", how="left") for i in range(study_len): negative = df.loc[i, "negative"] typical = df.loc[i, "typical"] indeterminate = df.loc[i, "indeterminate"] atypical = df.loc[i, "atypical"] df.loc[ i, "PredictionString" ] = f"negative {negative} 0 0 1 1 typical {typical} 0 0 1 1 indeterminate {indeterminate} 0 0 1 1 atypical {atypical} 0 0 1 1" df_study = df[["id", "PredictionString"]] print(df_study.head(10)) # # Run EfficientNet on Two Classes if fast_sub: sub_df = fast_df.copy() else: sub_df = pd.read_csv("../input/siim-covid19-detection/sample_submission.csv") sub_df = sub_df[study_len:] test_paths = f"/kaggle/working/{split}/image/" + sub_df["id"] + ".png" sub_df["none"] = 0 label_cols = sub_df.columns[2] test_decoder = build_decoder( with_labels=False, target_size=(IMSIZE[8], IMSIZE[8]), ext="png" ) dtest = build_dataset( test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder, ) with strategy.scope(): models = [] models0 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-fold0-5-2class/model0.h5" ) models1 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-fold0-5-2class/model1.h5" ) models2 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-fold0-5-2class/model2.h5" ) models3 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-fold0-5-2class/model3.h5" ) models4 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-fold0-5-2class/model4.h5" ) models.append(models0) models.append(models1) models.append(models2) models.append(models3) models.append(models4) sub_df[label_cols] = sum([model.predict(dtest, verbose=1) for model in models]) / len( models ) df_2class = sub_df.reset_index(drop=True) del models del models0, models1, models2, models3, models4 print(df_2class.head(10)) from numba import cuda import torch cuda.select_device(0) cuda.close() cuda.select_device(0)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003408.ipynb
kerasapplications
xhlulu
[{"Id": 69003408, "ScriptId": 18829036, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6910760, "CreationDate": "07/25/2021 17:52:59", "VersionNumber": 1.0, "Title": "Submission", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 354.0, "LinesInsertedFromPrevious": 354.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 91688833, "KernelVersionId": 69003408, "SourceDatasetVersionId": 1666454}, {"Id": 91688834, "KernelVersionId": 69003408, "SourceDatasetVersionId": 2406834}]
[{"Id": 1666454, "DatasetId": 986800, "DatasourceVersionId": 1702809, "CreatorUserId": 2352583, "LicenseName": "Unknown", "CreationDate": "11/21/2020 05:57:19", "VersionNumber": 1.0, "Title": "[DEPRECATED] keras-applications", "Slug": "kerasapplications", "Subtitle": "DEPRECATED, USE: https://www.kaggle.com/datasets/xhlulu/keras-applications", "Description": "DEPRECATED, USE: https://www.kaggle.com/datasets/xhlulu/keras-applications", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 986800, "CreatorUserId": 2352583, "OwnerUserId": 2352583.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1666454.0, "CurrentDatasourceVersionId": 1702809.0, "ForumId": 1003340, "Type": 2, "CreationDate": "11/21/2020 05:57:19", "LastActivityDate": "11/21/2020", "TotalViews": 3409, "TotalDownloads": 118, "TotalVotes": 17, "TotalKernels": 124}]
[{"Id": 2352583, "UserName": "xhlulu", "DisplayName": "xhlulu", "RegisterDate": "10/12/2018", "PerformanceTier": 4}]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os from PIL import Image from tqdm.auto import tqdm import pydicom from pydicom.pixel_data_handlers.util import apply_voi_lut import efficientnet.tfkeras as efn import tensorflow as tf # [PYDICOM Condo Helper Ref.](https://www.kaggle.com/awsaf49/pydicom-conda-helper/) # # Read Submission CSV df = pd.read_csv("../input/siim-covid19-detection/sample_submission.csv") if df.shape[0] == 2477: fast_sub = True fast_df = pd.DataFrame( ( [ ["00086460a852_study", "negative 1 0 0 1 1"], ["000c9c05fd14_study", "negative 1 0 0 1 1"], ["65761e66de9f_image", "none 1 0 0 1 1"], ["51759b5579bc_image", "none 1 0 0 1 1"], ] ), columns=["id", "PredictionString"], ) else: fast_sub = False print(fast_sub) # # Conver .DCM File to png/jpg def read_xray(path, voi_lut=True, fix_monochrome=True): # Original from: https://www.kaggle.com/raddar/convert-dicom-to-np-array-the-correct-way dicom = pydicom.read_file(path) # VOI LUT (if available by DICOM device) is used to transform raw DICOM data to # "human-friendly" view if voi_lut: data = apply_voi_lut(dicom.pixel_array, dicom) else: data = dicom.pixel_array # depending on this value, X-ray may look inverted - fix that: if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1": data = np.amax(data) - data data = data - np.min(data) data = data / np.max(data) data = (data * 255).astype(np.uint8) return data def resize(array, size, keep_ratio=False, resample=Image.LANCZOS): # Original from: https://www.kaggle.com/xhlulu/vinbigdata-process-and-resize-to-image im = Image.fromarray(array) if keep_ratio: im.thumbnail((size, size), resample) else: im = im.resize((size, size), resample) return im # # Preprocess -'/kaggle/working' # * Convert/Resize/Store Study ID - Images # * Convert/Resize/Store Image ID - Images split = "test" save_dir = f"/kaggle/working/{split}/" os.makedirs(save_dir, exist_ok=True) save_dir = f"/kaggle/working/{split}/study/" os.makedirs(save_dir, exist_ok=True) if fast_sub: xray = read_xray( "../input/siim-covid19-detection/train/00086460a852/9e8302230c91/65761e66de9f.dcm" ) im = resize(xray, size=600) study = "00086460a852" + "_study.png" im.save(os.path.join(save_dir, study)) xray = read_xray( "../input/siim-covid19-detection/train/000c9c05fd14/e555410bd2cd/51759b5579bc.dcm" ) im = resize(xray, size=600) study = "000c9c05fd14" + "_study.png" im.save(os.path.join(save_dir, study)) else: for dirname, _, filenames in tqdm( os.walk(f"../input/siim-covid19-detection/{split}") ): for file in filenames: # set keep_ratio=True to have original aspect ratio xray = read_xray(os.path.join(dirname, file)) im = resize(xray, size=600) study = dirname.split("/")[-2] + "_study.png" im.save(os.path.join(save_dir, study)) image_id = [] dim0 = [] dim1 = [] splits = [] save_dir = f"/kaggle/working/{split}/image/" os.makedirs(save_dir, exist_ok=True) if fast_sub: xray = read_xray( "../input/siim-covid19-detection/train/00086460a852/9e8302230c91/65761e66de9f.dcm" ) im = resize(xray, size=512) im.save(os.path.join(save_dir, "65761e66de9f_image.png")) image_id.append("65761e66de9f.dcm".replace(".dcm", "")) dim0.append(xray.shape[0]) dim1.append(xray.shape[1]) splits.append(split) xray = read_xray( "../input/siim-covid19-detection/train/000c9c05fd14/e555410bd2cd/51759b5579bc.dcm" ) im = resize(xray, size=512) im.save(os.path.join(save_dir, "51759b5579bc_image.png")) image_id.append("51759b5579bc.dcm".replace(".dcm", "")) dim0.append(xray.shape[0]) dim1.append(xray.shape[1]) splits.append(split) else: for dirname, _, filenames in tqdm( os.walk(f"../input/siim-covid19-detection/{split}") ): for file in filenames: # set keep_ratio=True to have original aspect ratio xray = read_xray(os.path.join(dirname, file)) im = resize(xray, size=512) im.save(os.path.join(save_dir, file.replace(".dcm", "_image.png"))) image_id.append(file.replace(".dcm", "")) dim0.append(xray.shape[0]) dim1.append(xray.shape[1]) splits.append(split) meta = pd.DataFrame.from_dict( {"image_id": image_id, "dim0": dim0, "dim1": dim1, "split": splits} ) # # Predict Study - Use Case if fast_sub: df = fast_df.copy() else: df = pd.read_csv("../input/siim-covid19-detection/sample_submission.csv") id_laststr_list = [] for i in range(df.shape[0]): id_laststr_list.append(df.loc[i, "id"][-1]) df["id_last_str"] = id_laststr_list study_len = df[df["id_last_str"] == "y"].shape[0] # # Import Keras efficientnet model def auto_select_accelerator(): try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print("Running on TPU:", tpu.master()) except ValueError: strategy = tf.distribute.get_strategy() print(f"Running on {strategy.num_replicas_in_sync} replicas") return strategy def build_decoder(with_labels=True, target_size=(300, 300), ext="jpg"): def decode(path): file_bytes = tf.io.read_file(path) if ext == "png": img = tf.image.decode_png(file_bytes, channels=3) elif ext in ["jpg", "jpeg"]: img = tf.image.decode_jpeg(file_bytes, channels=3) else: raise ValueError("Image extension not supported") img = tf.cast(img, tf.float32) / 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return decode(path), label return decode_with_labels if with_labels else decode def build_augmenter(with_labels=True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) return img def augment_with_labels(img, label): return augment(img), label return augment_with_labels if with_labels else augment def build_dataset( paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir="", ): if cache_dir != "" and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else (paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls=AUTO) dset = dset.cache(cache_dir) if cache else dset dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle) if shuffle else dset dset = dset.batch(bsize).prefetch(AUTO) return dset # COMPETITION_NAME = "siim-cov19-test-img512-study-600" strategy = auto_select_accelerator() BATCH_SIZE = strategy.num_replicas_in_sync * 16 IMSIZE = (224, 240, 260, 300, 380, 456, 528, 600, 512) # load_dir = f"/kaggle/input/{COMPETITION_NAME}/" if fast_sub: sub_df = fast_df.copy() else: sub_df = pd.read_csv("../input/siim-covid19-detection/sample_submission.csv") sub_df = sub_df[:study_len] test_paths = f"/kaggle/working/{split}/study/" + sub_df["id"] + ".png" sub_df["negative"] = 0 sub_df["typical"] = 0 sub_df["indeterminate"] = 0 sub_df["atypical"] = 0 label_cols = sub_df.columns[2:] test_decoder = build_decoder( with_labels=False, target_size=(IMSIZE[7], IMSIZE[7]), ext="png" ) dtest = build_dataset( test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder, ) with strategy.scope(): models = [] models0 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-study/model0.h5" ) models1 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-study/model1.h5" ) models2 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-study/model2.h5" ) models3 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-study/model3.h5" ) models4 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-study/model4.h5" ) models.append(models0) models.append(models1) models.append(models2) models.append(models3) models.append(models4) sub_df[label_cols] = sum([model.predict(dtest, verbose=1) for model in models]) / len( models ) sub_df.columns = [ "id", "PredictionString1", "negative", "typical", "indeterminate", "atypical", ] df = pd.merge(df, sub_df, on="id", how="left") for i in range(study_len): negative = df.loc[i, "negative"] typical = df.loc[i, "typical"] indeterminate = df.loc[i, "indeterminate"] atypical = df.loc[i, "atypical"] df.loc[ i, "PredictionString" ] = f"negative {negative} 0 0 1 1 typical {typical} 0 0 1 1 indeterminate {indeterminate} 0 0 1 1 atypical {atypical} 0 0 1 1" df_study = df[["id", "PredictionString"]] print(df_study.head(10)) # # Run EfficientNet on Two Classes if fast_sub: sub_df = fast_df.copy() else: sub_df = pd.read_csv("../input/siim-covid19-detection/sample_submission.csv") sub_df = sub_df[study_len:] test_paths = f"/kaggle/working/{split}/image/" + sub_df["id"] + ".png" sub_df["none"] = 0 label_cols = sub_df.columns[2] test_decoder = build_decoder( with_labels=False, target_size=(IMSIZE[8], IMSIZE[8]), ext="png" ) dtest = build_dataset( test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder, ) with strategy.scope(): models = [] models0 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-fold0-5-2class/model0.h5" ) models1 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-fold0-5-2class/model1.h5" ) models2 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-fold0-5-2class/model2.h5" ) models3 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-fold0-5-2class/model3.h5" ) models4 = tf.keras.models.load_model( "../input/siim-covid19-efnb7-train-fold0-5-2class/model4.h5" ) models.append(models0) models.append(models1) models.append(models2) models.append(models3) models.append(models4) sub_df[label_cols] = sum([model.predict(dtest, verbose=1) for model in models]) / len( models ) df_2class = sub_df.reset_index(drop=True) del models del models0, models1, models2, models3, models4 print(df_2class.head(10)) from numba import cuda import torch cuda.select_device(0) cuda.close() cuda.select_device(0)
false
1
4,106
0
4,151
4,106
69003256
# Importing necessary packages import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model import Lasso from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import RandomForestRegressor as RFR from sklearn.metrics import mean_absolute_error, mean_squared_error # Importing necessary dataset resid_data = pd.read_csv("train.csv") resid_data resid_data.info() pd.set_option("display.max_rows", None) resid_data.isnull().sum() na_features = [ features for features in resid_data.columns if resid_data[features].isnull().any() == True ] na_features # Now we will plot the features wrt Median Sales price for 0 and 1 values in features for feature in na_features: data = resid_data.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) data.groupby(feature)["SalePrice"].median().plot.bar(color=["red", "black"]) print(data.groupby(feature)["SalePrice"].median()) plt.show() # Extracting all the numerical features from the dataset num_features = [ feature for feature in resid_data.columns if resid_data[feature].dtypes != "O" ] print("Number of Numerical Features:", len(num_features)) resid_data[num_features] # Extracting datatime features from the dataset dt_year_feature = [ feature for feature in num_features if "Year" in feature or "Yr" in feature ] print("Number of Dt_year_Features:", len(dt_year_feature)) resid_data[dt_year_feature] # Analyzing yearly features wrt SalePrice for feature in dt_year_feature: resid_data_copy = resid_data.copy() resid_data_copy.groupby(feature)["SalePrice"].median().plot() plt.show() # Numerical Feature we have two types of variables continuos and discrete,we will extract them indiviually. # Extracting Discrete Feature disc_feature = [ feature for feature in num_features if len(resid_data[feature].unique()) < 45 and feature not in dt_year_feature + ["Id"] ] print("Number of discrete feature:", len(disc_feature)) resid_data[disc_feature] # Bar graph between discrete feature and SalePrice for feature in disc_feature: resid_data_copy = resid_data.copy() resid_data_copy.groupby(feature)["SalePrice"].median().plot.bar( color=["pink", "indigo", "red", "blue", "green", "black", "blue", "maroon"] ) plt.ylabel("SalePrice") plt.show() # Extracting Continuous Features cont_features = [ feature for feature in num_features if feature not in disc_feature + dt_year_feature + ["Id"] ] print("Number of Continuous Feature:", len(cont_features)) resid_data[cont_features] # Data analysis for continuous feature with the help of histograms for feature in cont_features: resid_data_copy = resid_data.copy() resid_data_copy[feature].hist(bins=15) plt.ylabel("count") plt.xlabel(feature) plt.show() for feature in cont_features: data = resid_data.copy() data[feature] = np.log1p(data[feature]) data["SalePrice"] = np.log1p(data["SalePrice"]) if feature == "SalePrice": pass else: plt.scatter(data[feature], data["SalePrice"]) plt.xlabel(feature) plt.ylabel("SalePrice") plt.show() # Gausian distribution for continous features for feature in cont_features: data = resid_data.copy() data[feature] = np.log1p(data[feature]) data["SalePrice"] = np.log1p(data["SalePrice"]) if feature == "SalePrice": pass else: plt.scatter(data[feature], data["SalePrice"]) plt.xlabel(feature) plt.ylabel("SalePrice") plt.show() # Categorical variable cat_features = [ feature for feature in resid_data.columns if resid_data[feature].dtypes == "O" ] print("Number of categorical features:", len(cat_features)) resid_data[cat_features] # Bar graphs in between categorical data columns and sales pricing for feature in cat_features: df_data = resid_data.copy() df_data.groupby(feature)["SalePrice"].median().plot.bar( color=[ "red", "black", "blue", "pink", "purple", "turquoise", "brown", "darkorange", ] ) plt.show() # Finding out % of missing values in categorical features pct_missingvalues = ( resid_data[ [feature for feature in resid_data.columns if resid_data[feature].dtypes == "O"] ] .isnull() .sum() / len(resid_data) * 100 ) pct_missingvalues # Removing categorical features where missing values is more than 50 and dropping the columns missing_features = pct_missingvalues[pct_missingvalues > 50] missing_features for feature in missing_features.index: resid_data.drop([feature], axis=1, inplace=True) na_features = [ feature for feature in resid_data.columns if resid_data[feature].isnull().sum().any() == True ] na_features num_features = [feature for feature in na_features if resid_data[feature].dtypes != "O"] resid_data[num_features].isnull().sum() / len(resid_data) * 100 # Feature Engineering X = resid_data.drop(["SalePrice"], axis=1) y = resid_data["SalePrice"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) X_train.shape, X_test.shape train = pd.concat([X_train, y_train], axis=1) test = pd.concat([X_test, y_test], axis=1) # Features with nan values in training set na_features = [ features for features in train.columns if train[features].isnull().any() == True ] na_features # Numerical Feature na_num = [feature for feature in na_features if train[feature].dtypes != "O"] print("Number of null numerical feature:", len(na_num)) train[na_num] train[na_num].isnull().sum() for feature in na_num: train[feature].fillna(train[feature].median(), inplace=True) train[na_num].isnull().sum() train[na_num] skew_num_features = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea", "SalePrice"] for feature in skew_num_features: train[feature] = np.log(train[feature]) # Year/Date time Features train[dt_year_feature].isnull().sum() train[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head() for feature in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]: train[feature] = train["YrSold"] - train[feature] train[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head() # Categorical Feature na_cat_feature = [feature for feature in na_features if train[feature].dtypes == "O"] print("number of na categorical features:", len(na_cat_feature)) train[na_cat_feature] train[na_cat_feature].isnull().sum() / len(train) for feature in na_cat_feature: mode_value = train[feature].mode()[0] train[feature].fillna(mode_value, inplace=True) train[na_cat_feature].isnull().sum() # Repeating all the steps with feature engineering on Test data set to avoid data leakage cat_features = [feature for feature in train.columns if train[feature].dtypes == "O"] for feature in cat_features: lab_ord = train.groupby(feature)["SalePrice"].mean().sort_values().index lab_ord = {k: i for i, k in enumerate(lab_ord, 0)} train[feature] = train[feature].map(lab_ord) train train.isnull().sum() na_features = [ features for features in test.columns if test[features].isnull().any() == True ] na_features na_num = [feature for feature in na_features if test[feature].dtypes != "O"] print("Number of null numerical feature:", len(na_num)) test[na_num].isnull().sum() skew_num_features = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea", "SalePrice"] for feature in skew_num_features: test[feature] = np.log(test[feature]) test for feature in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]: test[feature] = test["YrSold"] - test[feature] test[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head() cat_features = [feature for feature in test.columns if test[feature].dtypes == "O"] for feature in cat_features: lab_ord = test.groupby(feature)["SalePrice"].mean().sort_values().index lab_ord = {k: i for i, k in enumerate(lab_ord, 0)} test[feature] = test[feature].map(lab_ord) pd.set_option("display.max_columns", None) test test.isnull().sum() # Performing feature scaling on train and test data scal_feat = [feature for feature in train.columns if feature not in ["Id", "SalePrice"]] scal = StandardScaler() train_scal = scal.fit_transform(train[scal_feat]) test_scal = scal.transform(test[scal_feat]) X = pd.DataFrame(train_scal, columns=scal_feat) train = pd.concat([X, train["SalePrice"].reset_index(drop=True)], axis=1) train train.isnull().sum() X1 = pd.DataFrame(test_scal, columns=scal_feat) test = pd.concat([X1, test["SalePrice"].reset_index(drop=True)], axis=1) test.head() test.isnull().sum() # Performing Feature Selection X_train = train.drop(["SalePrice"], axis=1) y_train = train["SalePrice"] feat_sel_model = SelectFromModel(Lasso(alpha=0.05, random_state=0)) feat_sel_model.fit(X_train, y_train) feat_sel_model.get_support() sel_feat = X_train.columns[(feat_sel_model.get_support())] print("selected features:", len(sel_feat)) sel_feat # Fitting model to dataset from sklearn.ensemble import RandomForestRegressor rf_reg = RandomForestRegressor() rf_reg.fit(X_train, y_train) prediction = rf_reg.predict(X_test) print("MAE:", mean_absolute_error(y_test, prediction)) print("MSE:", mean_squared_error(y_test, prediction)) print("RMSE:", np.sqrt(mean_squared_error(y_test, prediction))) resid_test = pd.read_csv("test.csv") resid_test # executing feature engineering on resid test dataset for output na_features = [ features for features in resid_test.columns if resid_test[features].isnull().any() == True ] na_features na_num = [feature for feature in na_num if resid_test[feature].dtypes != "O"] print("Number of null numerical feature:", len(na_num)) resid_test[na_num].isnull().sum() for feature in na_num: resid_test[feature].fillna(resid_test[feature].median(), inplace=True) resid_test[na_num].isnull().sum() for feature in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]: resid_test[feature] = resid_test["YrSold"] - resid_test[feature] resid_test[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head() na_cat_features = [ feature for feature in na_features if resid_test[feature].dtypes == "O" ] print("number of null categorical features:", len(na_cat_features)) na_cat_features pct = resid_test[na_cat_features].isnull().sum() / len(resid_test) miss_feat = pct[pct > 0.7] miss_feat.index for feature in miss_feat.index: resid_test.drop([feature], inplace=True, axis=1) resid_test.head() na_feat = [ feature for feature in resid_test.columns if resid_test[feature].isnull().sum().any() == True ] na_feat na_cat_feat = [feature for feature in na_feat if resid_test[feature].dtypes == "O"] for feature in na_cat_feat: mode_value = resid_test[feature].mode()[0] resid_test[feature] = resid_test[feature].fillna(mode_value) resid_test.isnull().sum() resid_test.head() # performing feature scaling in house test data resid_test_scal = scal.transform(resid_test[scal_feat]) X_resid = pd.DataFrame(resid_test_scal, columns=scal_feat) X_resid X_house = X_house[selected_feat] X_house.head() price_prediction = rf_reg.predict(X_house) price_prediction np.exp(price_prediction) # Prediction Metrics sample = pd.read_csv("sample_submission.csv") y_test = sample["SalePrice"] print("MAE:", mean_absolute_error(np.log(y_test), price_prediction)) print("MSE:", mean_squared_error(np.log(y_test), price_prediction)) print("RMSE:", np.sqrt(mean_squared_error(np.log(y_test), price_prediction))) resid_test["SalePrice"] = np.exp(price_prediction) submission = resid_test[["Id", "SalePrice"]] submission.to_csv("./submission1.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003256.ipynb
null
null
[{"Id": 69003256, "ScriptId": 18830697, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7878105, "CreationDate": "07/25/2021 17:50:01", "VersionNumber": 1.0, "Title": "Saumya_Ajay_Kumar", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 394.0, "LinesInsertedFromPrevious": 394.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# Importing necessary packages import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model import Lasso from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import RandomForestRegressor as RFR from sklearn.metrics import mean_absolute_error, mean_squared_error # Importing necessary dataset resid_data = pd.read_csv("train.csv") resid_data resid_data.info() pd.set_option("display.max_rows", None) resid_data.isnull().sum() na_features = [ features for features in resid_data.columns if resid_data[features].isnull().any() == True ] na_features # Now we will plot the features wrt Median Sales price for 0 and 1 values in features for feature in na_features: data = resid_data.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) data.groupby(feature)["SalePrice"].median().plot.bar(color=["red", "black"]) print(data.groupby(feature)["SalePrice"].median()) plt.show() # Extracting all the numerical features from the dataset num_features = [ feature for feature in resid_data.columns if resid_data[feature].dtypes != "O" ] print("Number of Numerical Features:", len(num_features)) resid_data[num_features] # Extracting datatime features from the dataset dt_year_feature = [ feature for feature in num_features if "Year" in feature or "Yr" in feature ] print("Number of Dt_year_Features:", len(dt_year_feature)) resid_data[dt_year_feature] # Analyzing yearly features wrt SalePrice for feature in dt_year_feature: resid_data_copy = resid_data.copy() resid_data_copy.groupby(feature)["SalePrice"].median().plot() plt.show() # Numerical Feature we have two types of variables continuos and discrete,we will extract them indiviually. # Extracting Discrete Feature disc_feature = [ feature for feature in num_features if len(resid_data[feature].unique()) < 45 and feature not in dt_year_feature + ["Id"] ] print("Number of discrete feature:", len(disc_feature)) resid_data[disc_feature] # Bar graph between discrete feature and SalePrice for feature in disc_feature: resid_data_copy = resid_data.copy() resid_data_copy.groupby(feature)["SalePrice"].median().plot.bar( color=["pink", "indigo", "red", "blue", "green", "black", "blue", "maroon"] ) plt.ylabel("SalePrice") plt.show() # Extracting Continuous Features cont_features = [ feature for feature in num_features if feature not in disc_feature + dt_year_feature + ["Id"] ] print("Number of Continuous Feature:", len(cont_features)) resid_data[cont_features] # Data analysis for continuous feature with the help of histograms for feature in cont_features: resid_data_copy = resid_data.copy() resid_data_copy[feature].hist(bins=15) plt.ylabel("count") plt.xlabel(feature) plt.show() for feature in cont_features: data = resid_data.copy() data[feature] = np.log1p(data[feature]) data["SalePrice"] = np.log1p(data["SalePrice"]) if feature == "SalePrice": pass else: plt.scatter(data[feature], data["SalePrice"]) plt.xlabel(feature) plt.ylabel("SalePrice") plt.show() # Gausian distribution for continous features for feature in cont_features: data = resid_data.copy() data[feature] = np.log1p(data[feature]) data["SalePrice"] = np.log1p(data["SalePrice"]) if feature == "SalePrice": pass else: plt.scatter(data[feature], data["SalePrice"]) plt.xlabel(feature) plt.ylabel("SalePrice") plt.show() # Categorical variable cat_features = [ feature for feature in resid_data.columns if resid_data[feature].dtypes == "O" ] print("Number of categorical features:", len(cat_features)) resid_data[cat_features] # Bar graphs in between categorical data columns and sales pricing for feature in cat_features: df_data = resid_data.copy() df_data.groupby(feature)["SalePrice"].median().plot.bar( color=[ "red", "black", "blue", "pink", "purple", "turquoise", "brown", "darkorange", ] ) plt.show() # Finding out % of missing values in categorical features pct_missingvalues = ( resid_data[ [feature for feature in resid_data.columns if resid_data[feature].dtypes == "O"] ] .isnull() .sum() / len(resid_data) * 100 ) pct_missingvalues # Removing categorical features where missing values is more than 50 and dropping the columns missing_features = pct_missingvalues[pct_missingvalues > 50] missing_features for feature in missing_features.index: resid_data.drop([feature], axis=1, inplace=True) na_features = [ feature for feature in resid_data.columns if resid_data[feature].isnull().sum().any() == True ] na_features num_features = [feature for feature in na_features if resid_data[feature].dtypes != "O"] resid_data[num_features].isnull().sum() / len(resid_data) * 100 # Feature Engineering X = resid_data.drop(["SalePrice"], axis=1) y = resid_data["SalePrice"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) X_train.shape, X_test.shape train = pd.concat([X_train, y_train], axis=1) test = pd.concat([X_test, y_test], axis=1) # Features with nan values in training set na_features = [ features for features in train.columns if train[features].isnull().any() == True ] na_features # Numerical Feature na_num = [feature for feature in na_features if train[feature].dtypes != "O"] print("Number of null numerical feature:", len(na_num)) train[na_num] train[na_num].isnull().sum() for feature in na_num: train[feature].fillna(train[feature].median(), inplace=True) train[na_num].isnull().sum() train[na_num] skew_num_features = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea", "SalePrice"] for feature in skew_num_features: train[feature] = np.log(train[feature]) # Year/Date time Features train[dt_year_feature].isnull().sum() train[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head() for feature in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]: train[feature] = train["YrSold"] - train[feature] train[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head() # Categorical Feature na_cat_feature = [feature for feature in na_features if train[feature].dtypes == "O"] print("number of na categorical features:", len(na_cat_feature)) train[na_cat_feature] train[na_cat_feature].isnull().sum() / len(train) for feature in na_cat_feature: mode_value = train[feature].mode()[0] train[feature].fillna(mode_value, inplace=True) train[na_cat_feature].isnull().sum() # Repeating all the steps with feature engineering on Test data set to avoid data leakage cat_features = [feature for feature in train.columns if train[feature].dtypes == "O"] for feature in cat_features: lab_ord = train.groupby(feature)["SalePrice"].mean().sort_values().index lab_ord = {k: i for i, k in enumerate(lab_ord, 0)} train[feature] = train[feature].map(lab_ord) train train.isnull().sum() na_features = [ features for features in test.columns if test[features].isnull().any() == True ] na_features na_num = [feature for feature in na_features if test[feature].dtypes != "O"] print("Number of null numerical feature:", len(na_num)) test[na_num].isnull().sum() skew_num_features = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea", "SalePrice"] for feature in skew_num_features: test[feature] = np.log(test[feature]) test for feature in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]: test[feature] = test["YrSold"] - test[feature] test[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head() cat_features = [feature for feature in test.columns if test[feature].dtypes == "O"] for feature in cat_features: lab_ord = test.groupby(feature)["SalePrice"].mean().sort_values().index lab_ord = {k: i for i, k in enumerate(lab_ord, 0)} test[feature] = test[feature].map(lab_ord) pd.set_option("display.max_columns", None) test test.isnull().sum() # Performing feature scaling on train and test data scal_feat = [feature for feature in train.columns if feature not in ["Id", "SalePrice"]] scal = StandardScaler() train_scal = scal.fit_transform(train[scal_feat]) test_scal = scal.transform(test[scal_feat]) X = pd.DataFrame(train_scal, columns=scal_feat) train = pd.concat([X, train["SalePrice"].reset_index(drop=True)], axis=1) train train.isnull().sum() X1 = pd.DataFrame(test_scal, columns=scal_feat) test = pd.concat([X1, test["SalePrice"].reset_index(drop=True)], axis=1) test.head() test.isnull().sum() # Performing Feature Selection X_train = train.drop(["SalePrice"], axis=1) y_train = train["SalePrice"] feat_sel_model = SelectFromModel(Lasso(alpha=0.05, random_state=0)) feat_sel_model.fit(X_train, y_train) feat_sel_model.get_support() sel_feat = X_train.columns[(feat_sel_model.get_support())] print("selected features:", len(sel_feat)) sel_feat # Fitting model to dataset from sklearn.ensemble import RandomForestRegressor rf_reg = RandomForestRegressor() rf_reg.fit(X_train, y_train) prediction = rf_reg.predict(X_test) print("MAE:", mean_absolute_error(y_test, prediction)) print("MSE:", mean_squared_error(y_test, prediction)) print("RMSE:", np.sqrt(mean_squared_error(y_test, prediction))) resid_test = pd.read_csv("test.csv") resid_test # executing feature engineering on resid test dataset for output na_features = [ features for features in resid_test.columns if resid_test[features].isnull().any() == True ] na_features na_num = [feature for feature in na_num if resid_test[feature].dtypes != "O"] print("Number of null numerical feature:", len(na_num)) resid_test[na_num].isnull().sum() for feature in na_num: resid_test[feature].fillna(resid_test[feature].median(), inplace=True) resid_test[na_num].isnull().sum() for feature in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]: resid_test[feature] = resid_test["YrSold"] - resid_test[feature] resid_test[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head() na_cat_features = [ feature for feature in na_features if resid_test[feature].dtypes == "O" ] print("number of null categorical features:", len(na_cat_features)) na_cat_features pct = resid_test[na_cat_features].isnull().sum() / len(resid_test) miss_feat = pct[pct > 0.7] miss_feat.index for feature in miss_feat.index: resid_test.drop([feature], inplace=True, axis=1) resid_test.head() na_feat = [ feature for feature in resid_test.columns if resid_test[feature].isnull().sum().any() == True ] na_feat na_cat_feat = [feature for feature in na_feat if resid_test[feature].dtypes == "O"] for feature in na_cat_feat: mode_value = resid_test[feature].mode()[0] resid_test[feature] = resid_test[feature].fillna(mode_value) resid_test.isnull().sum() resid_test.head() # performing feature scaling in house test data resid_test_scal = scal.transform(resid_test[scal_feat]) X_resid = pd.DataFrame(resid_test_scal, columns=scal_feat) X_resid X_house = X_house[selected_feat] X_house.head() price_prediction = rf_reg.predict(X_house) price_prediction np.exp(price_prediction) # Prediction Metrics sample = pd.read_csv("sample_submission.csv") y_test = sample["SalePrice"] print("MAE:", mean_absolute_error(np.log(y_test), price_prediction)) print("MSE:", mean_squared_error(np.log(y_test), price_prediction)) print("RMSE:", np.sqrt(mean_squared_error(np.log(y_test), price_prediction))) resid_test["SalePrice"] = np.exp(price_prediction) submission = resid_test[["Id", "SalePrice"]] submission.to_csv("./submission1.csv", index=False)
false
0
3,681
0
3,681
3,681
69003911
<jupyter_start><jupyter_text>60,000+ Chess Game Dataset (Chess.com) ### Context This is the dataset for all of the chess enthusiasts and chess.com members. It has been created via the chess.com API. ### Content Features_included:- * white_username - Username of the white player * black_username - Username of the black player * white_id - Link to other details of white player * black_id - Link to other details of black player * white_rating - White's ELO rating * black_rating - Black's ELO rating * white_result - Either win or the loss condition (like checkmate, draw, etc.) * black_result - Either win or the loss condition (like checkmate, draw, etc.) * time_class - blitz, bullet, rapid or daily * time_control - Total_time + Time_increment * rules - Either normal chess or other variants (like chess960) * rated - Whether ELO points are at stake * fen - Standard notation for describing a particular board position of a chess game. * pgn - standard plain text format for recording chess games You can extract much more features from the pgn columns. For that refer to the following [Notebook](https://www.kaggle.com/adityajha1504/those-features-won-t-engineer-themselves) Kaggle dataset identifier: chesscom-user-games-60000-games <jupyter_code>import pandas as pd df = pd.read_csv('chesscom-user-games-60000-games/club_games_data.csv') df.info() <jupyter_output><class 'pandas.core.frame.DataFrame'> RangeIndex: 66879 entries, 0 to 66878 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 white_username 66879 non-null object 1 black_username 66879 non-null object 2 white_id 66879 non-null object 3 black_id 66879 non-null object 4 white_rating 66879 non-null int64 5 black_rating 66879 non-null int64 6 white_result 66879 non-null object 7 black_result 66879 non-null object 8 time_class 66879 non-null object 9 time_control 66879 non-null object 10 rules 66879 non-null object 11 rated 66879 non-null bool 12 fen 66879 non-null object 13 pgn 66879 non-null object dtypes: bool(1), int64(2), object(11) memory usage: 6.7+ MB <jupyter_text>Examples: { "white_username": "-Amos-", "black_username": "miniman2804", "white_id": "https://api.chess.com/pub/player/-amos-", "black_id": "https://api.chess.com/pub/player/miniman2804", "white_rating": 1708, "black_rating": 1608, "white_result": "win", "black_result": "checkmated", "time_class": "daily", "time_control": "1/259200", "rules": "chess", "rated": true, "fen": "r2r4/p2p1p1p/b6R/n1p1kp2/2P2P2/3BP3/PP5P/4K2R b K f3 1 22", "pgn": "[Event \"Enjoyable games 2 - Round 1\"]\n[Site \"Chess.com\"]\n[Date \"2013.01.30\"]\n[Round \"-\"]\n[White \"-Amos-\"]\n[Black \"miniman2804\"]\n[Result \"1-0\"]\n[Tournament \"https://www.chess.com/tournament/enjoyable-games-2\"]\n[CurrentPosition \"r2r4/p2p1p1p/b6R/n1p1kp2/2P2P2/3BP3/...(truncated)", } { "white_username": "-Amos-", "black_username": "koltcho69", "white_id": "https://api.chess.com/pub/player/-amos-", "black_id": "https://api.chess.com/pub/player/koltcho69", "white_rating": 1726, "black_rating": 1577, "white_result": "win", "black_result": "resigned", "time_class": "daily", "time_control": "1/172800", "rules": "chess", "rated": true, "fen": "8/5Q1k/4n1pp/8/7P/2N2b2/PP3P2/5K2 b - - 1 33", "pgn": "[Event \"Rapid Rats - Board 5\"]\n[Site \"Chess.com\"]\n[Date \"2013.01.19\"]\n[Round \"-\"]\n[White \"-Amos-\"]\n[Black \"koltcho69\"]\n[Result \"1-0\"]\n[Match \"https://www.chess.com/club/matches/219602\"]\n[CurrentPosition \"8/5Q1k/4n1pp/8/7P/2N2b2/PP3P2/5K2 b - - 1 33\"]\n[Timezone ...(truncated)", } { "white_username": "-Amos-", "black_username": "enhmandah", "white_id": "https://api.chess.com/pub/player/-amos-", "black_id": "https://api.chess.com/pub/player/enhmandah", "white_rating": 1727, "black_rating": 842, "white_result": "win", "black_result": "resigned", "time_class": "daily", "time_control": "1/172800", "rules": "chess", "rated": true, "fen": "rn1q1b1r/kb2p1pp/2p5/p1Q5/N1BP2n1/4PN2/1P3PPP/R1B1K2R b KQ - 5 15", "pgn": "[Event \"CHESS BOARD CLASH - Round 1\"]\n[Site \"Chess.com\"]\n[Date \"2013.02.01\"]\n[Round \"-\"]\n[White \"-Amos-\"]\n[Black \"enhmandah\"]\n[Result \"1-0\"]\n[Tournament \"https://www.chess.com/tournament/just-another-clash\"]\n[CurrentPosition \"rn1q1b1r/kb2p1pp/2p5/p1Q5/N1BP2n1/4PN...(truncated)", } { "white_username": "enhmandah", "black_username": "-Amos-", "white_id": "https://api.chess.com/pub/player/enhmandah", "black_id": "https://api.chess.com/pub/player/-amos-", "white_rating": 819, "black_rating": 1727, "white_result": "checkmated", "black_result": "win", "time_class": "daily", "time_control": "1/172800", "rules": "chess", "rated": true, "fen": "r3kb1r/pp3ppp/3p1n2/2pKp3/P3P3/1P6/4qP1P/QNB5 w kq - 3 17", "pgn": "[Event \"CHESS BOARD CLASH - Round 1\"]\n[Site \"Chess.com\"]\n[Date \"2013.02.01\"]\n[Round \"-\"]\n[White \"enhmandah\"]\n[Black \"-Amos-\"]\n[Result \"0-1\"]\n[Tournament \"https://www.chess.com/tournament/just-another-clash\"]\n[CurrentPosition \"r3kb1r/pp3ppp/3p1n2/2pKp3/P3P3/1P6/4q...(truncated)", } <jupyter_script>from IPython.core.display import HTML styles = """@import url('https://fonts.googleapis.com/css?family=Quicksand&display=swap'); * { margin: 0; padding: 0; box-sizing: border-box; } h3 { font-family: Comic Sans MS; } .alert { width: 80%; margin: 20px auto; padding: 30px; position: relative; border-radius: 5px; box-shadow: 0 0 15px 5px #ccc; } .close { position: absolute; width: 30px; height: 30px; opacity: 0.5; border-width: 1px; border-style: solid; border-radius: 50%; right: 15px; top: 25px; text-align: center; font-size: 1.6em; cursor: pointer; } .simple-alert { background-color: #aed6e5; border-left: 5px solid #245b70; } .simple-alert .close { border-color: #245b70; color: #245b70; } .success-alert { background-color: #aee5c0; border-left: 5px solid #24703d; } .success-alert .close { border-color: #24703d; color: #24703d; } .danger-alert { background-color: #e5aeae; border-left: 5px solid #702424; } .danger-alert .close { border-color: #702424; color: #702424; } .warning-alert { background-color: #ffe6a9; border-left: 5px solid #a97800; } .warning-alert .close { border-color: #a97800; color: #a97800; } """ HTML("<style>" + styles + "</style>") # ![](https://kgcorner.com/wp-content/uploads/2021/05/fq4cqqdmz4jv9agitg72.jpeg) # Extracting Features from Portable Game Notation (PGN) # The objective of this notebook is to extract features from the PGN features. # What is a PGN # Portable Game Notation (PGN) is a standard plain text format for recording chess games (both the moves and related data), which can be read by humans and is also supported by most chess software. import pandas as pd df = pd.read_csv("../input/chesscom-user-games-60000-games/club_games_data.csv") df.head(2) # # # 📌 What we are doing in this notebook: We have 14 features, the last one is the **PGN**. # We can extract many more features from it. # These features may be essential in improving your model accuracy. # # # Example PGN # df.pgn[0].split("\n") # The additional features we can extract from the pgn are :- # 'Event', 'Site', 'Start_Date', 'End_Date', 'Start_time', 'End_time', 'Round', 'Result', 'Tournament', 'ECO', 'First_Move', 'Second_Move', 'Third_Move', 'Fourth_Move'. feature_names = [ "Event", "Site", "Start_Date", "End_Date", "Start_Time", "End_Time", "Eco", "EcoName", "Round", "Result", "Game_Type", ] feature_positions = [0, 1, 2, -6, -7, -5, -15, -14, 3, 6, 7] # Takes in the name you want to give the feature, and the position of the feature in # the pgn.split('\n') and creates the feature with feature name in the dataframe for feature_name, position in zip(feature_names, feature_positions): df[feature_name] = df["pgn"].apply(lambda x: x.split("\n")[position].split('"')[1]) # The ECO Codes is a classification system for the chess openings moves. # There are five main categories, "A" to "E", corresponding to the five volumes of the earlier editions, each of which is further subdivided into 100 subcategories, for a total of 500 codes. The term "ECO" is often used as a shorthand for this coding system. # We can also extract the Eco_Name using the EcoName feature we extracted from the pgn df.iloc[0]["EcoName"] df["Eco_Name"] = df.EcoName.apply(lambda x: x.split("/")[-1]) # If you see both the pgn of the 1st row and the 2nd row you can notice a difference, # There are two types of game here, played in a Tournament, and played as a regular match # **Using this Information we can create another feature** :- Is_Tournament print("Tournament : \n {}\n".format(df.pgn[0].split("\n")[7])) print("Regular Match : \n {}".format(df.pgn[1].split("\n")[7])) df["Is_tournament"] = df["Game_Type"].apply(lambda x: "tournament" in x) # Creating the new and better csv df.to_csv("../working/df_clean.csv")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003911.ipynb
chesscom-user-games-60000-games
adityajha1504
[{"Id": 69003911, "ScriptId": 18806818, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5319702, "CreationDate": "07/25/2021 18:02:13", "VersionNumber": 2.0, "Title": "Extracting features from PGN", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 123.0, "LinesInsertedFromPrevious": 74.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 49.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
[{"Id": 91690053, "KernelVersionId": 69003911, "SourceDatasetVersionId": 2458363}]
[{"Id": 2458363, "DatasetId": 1488027, "DatasourceVersionId": 2500773, "CreatorUserId": 5319702, "LicenseName": "CC0: Public Domain", "CreationDate": "07/24/2021 14:38:57", "VersionNumber": 1.0, "Title": "60,000+ Chess Game Dataset (Chess.com)", "Slug": "chesscom-user-games-60000-games", "Subtitle": "60,000+ games played on Chess.com, including 20 + features", "Description": "### Context\n\nThis is the dataset for all of the chess enthusiasts and chess.com members. It has been created via the chess.com API.\n\n\n### Content\n\nFeatures_included:-\n\n* white_username - Username of the white player\n* black_username - Username of the black player\n* white_id - Link to other details of white player\n* black_id - Link to other details of black player\n* white_rating - White's ELO rating\n* black_rating - Black's ELO rating\n* white_result - Either win or the loss condition (like checkmate, draw, etc.)\n* black_result - Either win or the loss condition (like checkmate, draw, etc.)\n* time_class - blitz, bullet, rapid or daily\n* time_control - Total_time + Time_increment\n* rules - Either normal chess or other variants (like chess960)\n* rated - Whether ELO points are at stake\n* fen - Standard notation for describing a particular board position of a chess game.\n* pgn - standard plain text format for recording chess games\n\nYou can extract much more features from the pgn columns. For that refer to the following [Notebook](https://www.kaggle.com/adityajha1504/those-features-won-t-engineer-themselves)\n\n\n### Acknowledgements\n\nThanks to chess.com for creating such an amazing API.\n\n\n### Inspiration\n\nThere are a lot of interesting tasks that can be done with this dataset like,\n* Creating an LSTM that takes in moves and predicts results.\n* A model that takes in fen and predicts the result.\n* Analytics regarding the current ongoings and trends in chess, and how one can win more through the use of analytics.\n* A model that takes in the moves and predicts the Elo\netc.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
[{"Id": 1488027, "CreatorUserId": 5319702, "OwnerUserId": 5319702.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2458363.0, "CurrentDatasourceVersionId": 2500773.0, "ForumId": 1507718, "Type": 2, "CreationDate": "07/24/2021 14:38:57", "LastActivityDate": "07/24/2021", "TotalViews": 7204, "TotalDownloads": 571, "TotalVotes": 16, "TotalKernels": 2}]
[{"Id": 5319702, "UserName": "adityajha1504", "DisplayName": "AdityaJha1504", "RegisterDate": "06/17/2020", "PerformanceTier": 2}]
from IPython.core.display import HTML styles = """@import url('https://fonts.googleapis.com/css?family=Quicksand&display=swap'); * { margin: 0; padding: 0; box-sizing: border-box; } h3 { font-family: Comic Sans MS; } .alert { width: 80%; margin: 20px auto; padding: 30px; position: relative; border-radius: 5px; box-shadow: 0 0 15px 5px #ccc; } .close { position: absolute; width: 30px; height: 30px; opacity: 0.5; border-width: 1px; border-style: solid; border-radius: 50%; right: 15px; top: 25px; text-align: center; font-size: 1.6em; cursor: pointer; } .simple-alert { background-color: #aed6e5; border-left: 5px solid #245b70; } .simple-alert .close { border-color: #245b70; color: #245b70; } .success-alert { background-color: #aee5c0; border-left: 5px solid #24703d; } .success-alert .close { border-color: #24703d; color: #24703d; } .danger-alert { background-color: #e5aeae; border-left: 5px solid #702424; } .danger-alert .close { border-color: #702424; color: #702424; } .warning-alert { background-color: #ffe6a9; border-left: 5px solid #a97800; } .warning-alert .close { border-color: #a97800; color: #a97800; } """ HTML("<style>" + styles + "</style>") # ![](https://kgcorner.com/wp-content/uploads/2021/05/fq4cqqdmz4jv9agitg72.jpeg) # Extracting Features from Portable Game Notation (PGN) # The objective of this notebook is to extract features from the PGN features. # What is a PGN # Portable Game Notation (PGN) is a standard plain text format for recording chess games (both the moves and related data), which can be read by humans and is also supported by most chess software. import pandas as pd df = pd.read_csv("../input/chesscom-user-games-60000-games/club_games_data.csv") df.head(2) # # # 📌 What we are doing in this notebook: We have 14 features, the last one is the **PGN**. # We can extract many more features from it. # These features may be essential in improving your model accuracy. # # # Example PGN # df.pgn[0].split("\n") # The additional features we can extract from the pgn are :- # 'Event', 'Site', 'Start_Date', 'End_Date', 'Start_time', 'End_time', 'Round', 'Result', 'Tournament', 'ECO', 'First_Move', 'Second_Move', 'Third_Move', 'Fourth_Move'. feature_names = [ "Event", "Site", "Start_Date", "End_Date", "Start_Time", "End_Time", "Eco", "EcoName", "Round", "Result", "Game_Type", ] feature_positions = [0, 1, 2, -6, -7, -5, -15, -14, 3, 6, 7] # Takes in the name you want to give the feature, and the position of the feature in # the pgn.split('\n') and creates the feature with feature name in the dataframe for feature_name, position in zip(feature_names, feature_positions): df[feature_name] = df["pgn"].apply(lambda x: x.split("\n")[position].split('"')[1]) # The ECO Codes is a classification system for the chess openings moves. # There are five main categories, "A" to "E", corresponding to the five volumes of the earlier editions, each of which is further subdivided into 100 subcategories, for a total of 500 codes. The term "ECO" is often used as a shorthand for this coding system. # We can also extract the Eco_Name using the EcoName feature we extracted from the pgn df.iloc[0]["EcoName"] df["Eco_Name"] = df.EcoName.apply(lambda x: x.split("/")[-1]) # If you see both the pgn of the 1st row and the 2nd row you can notice a difference, # There are two types of game here, played in a Tournament, and played as a regular match # **Using this Information we can create another feature** :- Is_Tournament print("Tournament : \n {}\n".format(df.pgn[0].split("\n")[7])) print("Regular Match : \n {}".format(df.pgn[1].split("\n")[7])) df["Is_tournament"] = df["Game_Type"].apply(lambda x: "tournament" in x) # Creating the new and better csv df.to_csv("../working/df_clean.csv")
[{"chesscom-user-games-60000-games/club_games_data.csv": {"column_names": "[\"white_username\", \"black_username\", \"white_id\", \"black_id\", \"white_rating\", \"black_rating\", \"white_result\", \"black_result\", \"time_class\", \"time_control\", \"rules\", \"rated\", \"fen\", \"pgn\"]", "column_data_types": "{\"white_username\": \"object\", \"black_username\": \"object\", \"white_id\": \"object\", \"black_id\": \"object\", \"white_rating\": \"int64\", \"black_rating\": \"int64\", \"white_result\": \"object\", \"black_result\": \"object\", \"time_class\": \"object\", \"time_control\": \"object\", \"rules\": \"object\", \"rated\": \"bool\", \"fen\": \"object\", \"pgn\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 66879 entries, 0 to 66878\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 white_username 66879 non-null object\n 1 black_username 66879 non-null object\n 2 white_id 66879 non-null object\n 3 black_id 66879 non-null object\n 4 white_rating 66879 non-null int64 \n 5 black_rating 66879 non-null int64 \n 6 white_result 66879 non-null object\n 7 black_result 66879 non-null object\n 8 time_class 66879 non-null object\n 9 time_control 66879 non-null object\n 10 rules 66879 non-null object\n 11 rated 66879 non-null bool \n 12 fen 66879 non-null object\n 13 pgn 66879 non-null object\ndtypes: bool(1), int64(2), object(11)\nmemory usage: 6.7+ MB\n", "summary": "{\"white_rating\": {\"count\": 66879.0, \"mean\": 1247.5857294516964, \"std\": 403.89596684709636, \"min\": 100.0, \"25%\": 976.0, \"50%\": 1252.0, \"75%\": 1524.0, \"max\": 3172.0}, \"black_rating\": {\"count\": 66879.0, \"mean\": 1246.9827300049342, \"std\": 403.55207244356626, \"min\": 100.0, \"25%\": 975.0, \"50%\": 1251.0, \"75%\": 1524.0, \"max\": 3172.0}}", "examples": "{\"white_username\":{\"0\":\"-Amos-\",\"1\":\"-Amos-\",\"2\":\"-Amos-\",\"3\":\"enhmandah\"},\"black_username\":{\"0\":\"miniman2804\",\"1\":\"koltcho69\",\"2\":\"enhmandah\",\"3\":\"-Amos-\"},\"white_id\":{\"0\":\"https:\\/\\/api.chess.com\\/pub\\/player\\/-amos-\",\"1\":\"https:\\/\\/api.chess.com\\/pub\\/player\\/-amos-\",\"2\":\"https:\\/\\/api.chess.com\\/pub\\/player\\/-amos-\",\"3\":\"https:\\/\\/api.chess.com\\/pub\\/player\\/enhmandah\"},\"black_id\":{\"0\":\"https:\\/\\/api.chess.com\\/pub\\/player\\/miniman2804\",\"1\":\"https:\\/\\/api.chess.com\\/pub\\/player\\/koltcho69\",\"2\":\"https:\\/\\/api.chess.com\\/pub\\/player\\/enhmandah\",\"3\":\"https:\\/\\/api.chess.com\\/pub\\/player\\/-amos-\"},\"white_rating\":{\"0\":1708,\"1\":1726,\"2\":1727,\"3\":819},\"black_rating\":{\"0\":1608,\"1\":1577,\"2\":842,\"3\":1727},\"white_result\":{\"0\":\"win\",\"1\":\"win\",\"2\":\"win\",\"3\":\"checkmated\"},\"black_result\":{\"0\":\"checkmated\",\"1\":\"resigned\",\"2\":\"resigned\",\"3\":\"win\"},\"time_class\":{\"0\":\"daily\",\"1\":\"daily\",\"2\":\"daily\",\"3\":\"daily\"},\"time_control\":{\"0\":\"1\\/259200\",\"1\":\"1\\/172800\",\"2\":\"1\\/172800\",\"3\":\"1\\/172800\"},\"rules\":{\"0\":\"chess\",\"1\":\"chess\",\"2\":\"chess\",\"3\":\"chess\"},\"rated\":{\"0\":true,\"1\":true,\"2\":true,\"3\":true},\"fen\":{\"0\":\"r2r4\\/p2p1p1p\\/b6R\\/n1p1kp2\\/2P2P2\\/3BP3\\/PP5P\\/4K2R b K f3 1 22\",\"1\":\"8\\/5Q1k\\/4n1pp\\/8\\/7P\\/2N2b2\\/PP3P2\\/5K2 b - - 1 33\",\"2\":\"rn1q1b1r\\/kb2p1pp\\/2p5\\/p1Q5\\/N1BP2n1\\/4PN2\\/1P3PPP\\/R1B1K2R b KQ - 5 15\",\"3\":\"r3kb1r\\/pp3ppp\\/3p1n2\\/2pKp3\\/P3P3\\/1P6\\/4qP1P\\/QNB5 w kq - 3 17\"},\"pgn\":{\"0\":\"[Event \\\"Enjoyable games 2 - Round 1\\\"]\\n[Site \\\"Chess.com\\\"]\\n[Date \\\"2013.01.30\\\"]\\n[Round \\\"-\\\"]\\n[White \\\"-Amos-\\\"]\\n[Black \\\"miniman2804\\\"]\\n[Result \\\"1-0\\\"]\\n[Tournament \\\"https:\\/\\/www.chess.com\\/tournament\\/enjoyable-games-2\\\"]\\n[CurrentPosition \\\"r2r4\\/p2p1p1p\\/b6R\\/n1p1kp2\\/2P2P2\\/3BP3\\/PP5P\\/4K2R b K f3 1 22\\\"]\\n[Timezone \\\"UTC\\\"]\\n[ECO \\\"E22\\\"]\\n[ECOUrl \\\"https:\\/\\/www.chess.com\\/openings\\/Nimzo-Indian-Defense-Spielmann-Variation\\\"]\\n[UTCDate \\\"2013.01.30\\\"]\\n[UTCTime \\\"16:35:14\\\"]\\n[WhiteElo \\\"1708\\\"]\\n[BlackElo \\\"1608\\\"]\\n[TimeControl \\\"1\\/259200\\\"]\\n[Termination \\\"-Amos- won by checkmate\\\"]\\n[StartTime \\\"16:35:14\\\"]\\n[EndDate \\\"2013.02.01\\\"]\\n[EndTime \\\"18:14:48\\\"]\\n[Link \\\"https:\\/\\/www.chess.com\\/game\\/daily\\/64629816\\\"]\\n\\n1. d4 Nf6 2. c4 e6 3. Nc3 Bb4 4. Qb3 Bxc3+ 5. Qxc3 O-O 6. Bg5 c5 7. dxc5 Nc6 8. Nf3 Qa5 9. Bxf6 gxf6 10. Qxa5 Nxa5 11. e3 Rd8 12. Rd1 Kg7 13. Be2 b6 14. Rd4 bxc5 15. Rg4+ Kh6 16. Bd3 f5 17. Rh4+ Kg6 18. g4 Ba6 19. gxf5+ exf5 20. Ne5+ Kf6 21. Rh6+ Kxe5 22. f4# 1-0\\n\",\"1\":\"[Event \\\"Rapid Rats - Board 5\\\"]\\n[Site \\\"Chess.com\\\"]\\n[Date \\\"2013.01.19\\\"]\\n[Round \\\"-\\\"]\\n[White \\\"-Amos-\\\"]\\n[Black \\\"koltcho69\\\"]\\n[Result \\\"1-0\\\"]\\n[Match \\\"https:\\/\\/www.chess.com\\/club\\/matches\\/219602\\\"]\\n[CurrentPosition \\\"8\\/5Q1k\\/4n1pp\\/8\\/7P\\/2N2b2\\/PP3P2\\/5K2 b - - 1 33\\\"]\\n[Timezone \\\"UTC\\\"]\\n[ECO \\\"C53\\\"]\\n[ECOUrl \\\"https:\\/\\/www.chess.com\\/openings\\/Giuoco-Piano-Game-Main-Line\\\"]\\n[UTCDate \\\"2013.01.19\\\"]\\n[UTCTime \\\"14:29:25\\\"]\\n[WhiteElo \\\"1726\\\"]\\n[BlackElo \\\"1577\\\"]\\n[TimeControl \\\"1\\/172800\\\"]\\n[Termination \\\"-Amos- won by resignation\\\"]\\n[StartTime \\\"14:29:25\\\"]\\n[EndDate \\\"2013.02.01\\\"]\\n[EndTime \\\"18:22:03\\\"]\\n[Link \\\"https:\\/\\/www.chess.com\\/game\\/daily\\/64070770\\\"]\\n\\n1. e4 e5 2. Nf3 Nc6 3. Bc4 Bc5 4. c3 a6 5. d4 exd4 6. cxd4 Be7 7. Qb3 Na5 8. Qc2 Nxc4 9. Qxc4 d6 10. Nc3 c6 11. O-O h6 12. Re1 Nf6 13. d5 c5 14. e5 dxe5 15. Nxe5 O-O 16. Ng6 Re8 17. Rxe7 Rxe7 18. Nxe7+ Qxe7 19. Bf4 b5 20. d6 Qd7 21. Qxc5 Bb7 22. Qc7 Qxc7 23. dxc7 Nd5 24. Rd1 Nxf4 25. Rd8+ Kh7 26. Rxa8 Bxa8 27. c8=Q Bxg2 28. Qxa6 Bf3 29. Qxb5 Nh3+ 30. Kf1 g6 31. Qd7 Ng5 32. h4 Ne6 33. Qxf7+ 1-0\\n\",\"2\":\"[Event \\\"CHESS BOARD CLASH - Round 1\\\"]\\n[Site \\\"Chess.com\\\"]\\n[Date \\\"2013.02.01\\\"]\\n[Round \\\"-\\\"]\\n[White \\\"-Amos-\\\"]\\n[Black \\\"enhmandah\\\"]\\n[Result \\\"1-0\\\"]\\n[Tournament \\\"https:\\/\\/www.chess.com\\/tournament\\/just-another-clash\\\"]\\n[CurrentPosition \\\"rn1q1b1r\\/kb2p1pp\\/2p5\\/p1Q5\\/N1BP2n1\\/4PN2\\/1P3PPP\\/R1B1K2R b KQ - 5 15\\\"]\\n[Timezone \\\"UTC\\\"]\\n[ECO \\\"D00\\\"]\\n[ECOUrl \\\"https:\\/\\/www.chess.com\\/openings\\/Queens-Pawn-Opening-1...d5-2.e3\\\"]\\n[UTCDate \\\"2013.02.01\\\"]\\n[UTCTime \\\"11:24:19\\\"]\\n[WhiteElo \\\"1727\\\"]\\n[BlackElo \\\"842\\\"]\\n[TimeControl \\\"1\\/172800\\\"]\\n[Termination \\\"-Amos- won by resignation\\\"]\\n[StartTime \\\"11:24:19\\\"]\\n[EndDate \\\"2013.02.02\\\"]\\n[EndTime \\\"17:58:11\\\"]\\n[Link \\\"https:\\/\\/www.chess.com\\/game\\/daily\\/64714474\\\"]\\n\\n1. d4 d5 2. e3 c6 3. c4 dxc4 4. Bxc4 b5 5. Bb3 a5 6. Qf3 Bb7 7. Bxf7+ Kd7 8. Qf5+ Kc7 9. Nf3 Nh6 10. Qe5+ Kb6 11. a4 bxa4 12. Nc3 Ng4 13. Nxa4+ Ka6 14. Bc4+ Ka7 15. Qc5+ 1-0\\n\",\"3\":\"[Event \\\"CHESS BOARD CLASH - Round 1\\\"]\\n[Site \\\"Chess.com\\\"]\\n[Date \\\"2013.02.01\\\"]\\n[Round \\\"-\\\"]\\n[White \\\"enhmandah\\\"]\\n[Black \\\"-Amos-\\\"]\\n[Result \\\"0-1\\\"]\\n[Tournament \\\"https:\\/\\/www.chess.com\\/tournament\\/just-another-clash\\\"]\\n[CurrentPosition \\\"r3kb1r\\/pp3ppp\\/3p1n2\\/2pKp3\\/P3P3\\/1P6\\/4qP1P\\/QNB5 w kq - 3 17\\\"]\\n[Timezone \\\"UTC\\\"]\\n[ECO \\\"B20\\\"]\\n[ECOUrl \\\"https:\\/\\/www.chess.com\\/openings\\/Sicilian-Defense-Snyder-Variation\\\"]\\n[UTCDate \\\"2013.02.01\\\"]\\n[UTCTime \\\"11:24:17\\\"]\\n[WhiteElo \\\"819\\\"]\\n[BlackElo \\\"1727\\\"]\\n[TimeControl \\\"1\\/172800\\\"]\\n[Termination \\\"-Amos- won by checkmate\\\"]\\n[StartTime \\\"11:24:17\\\"]\\n[EndDate \\\"2013.02.02\\\"]\\n[EndTime \\\"18:29:41\\\"]\\n[Link \\\"https:\\/\\/www.chess.com\\/game\\/daily\\/64714344\\\"]\\n\\n1. e4 c5 2. b3 Nc6 3. a4 d6 4. Bb5 Bd7 5. Qf3 Nd4 6. Qf4 e5 7. Qg3 Nxc2+ 8. Kd1 Nxa1 9. Qc3 Bxb5 10. Qxa1 Qg5 11. d3 Qxg2 12. Ne2 Qxh1+ 13. Kd2 Bxd3 14. Kxd3 Qd1+ 15. Kc4 Qxe2+ 16. Kd5 Nf6# 0-1\\n\"}}"}}]
true
1
<start_data_description><data_path>chesscom-user-games-60000-games/club_games_data.csv: <column_names> ['white_username', 'black_username', 'white_id', 'black_id', 'white_rating', 'black_rating', 'white_result', 'black_result', 'time_class', 'time_control', 'rules', 'rated', 'fen', 'pgn'] <column_types> {'white_username': 'object', 'black_username': 'object', 'white_id': 'object', 'black_id': 'object', 'white_rating': 'int64', 'black_rating': 'int64', 'white_result': 'object', 'black_result': 'object', 'time_class': 'object', 'time_control': 'object', 'rules': 'object', 'rated': 'bool', 'fen': 'object', 'pgn': 'object'} <dataframe_Summary> {'white_rating': {'count': 66879.0, 'mean': 1247.5857294516964, 'std': 403.89596684709636, 'min': 100.0, '25%': 976.0, '50%': 1252.0, '75%': 1524.0, 'max': 3172.0}, 'black_rating': {'count': 66879.0, 'mean': 1246.9827300049342, 'std': 403.55207244356626, 'min': 100.0, '25%': 975.0, '50%': 1251.0, '75%': 1524.0, 'max': 3172.0}} <dataframe_info> RangeIndex: 66879 entries, 0 to 66878 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 white_username 66879 non-null object 1 black_username 66879 non-null object 2 white_id 66879 non-null object 3 black_id 66879 non-null object 4 white_rating 66879 non-null int64 5 black_rating 66879 non-null int64 6 white_result 66879 non-null object 7 black_result 66879 non-null object 8 time_class 66879 non-null object 9 time_control 66879 non-null object 10 rules 66879 non-null object 11 rated 66879 non-null bool 12 fen 66879 non-null object 13 pgn 66879 non-null object dtypes: bool(1), int64(2), object(11) memory usage: 6.7+ MB <some_examples> {'white_username': {'0': '-Amos-', '1': '-Amos-', '2': '-Amos-', '3': 'enhmandah'}, 'black_username': {'0': 'miniman2804', '1': 'koltcho69', '2': 'enhmandah', '3': '-Amos-'}, 'white_id': {'0': 'https://api.chess.com/pub/player/-amos-', '1': 'https://api.chess.com/pub/player/-amos-', '2': 'https://api.chess.com/pub/player/-amos-', '3': 'https://api.chess.com/pub/player/enhmandah'}, 'black_id': {'0': 'https://api.chess.com/pub/player/miniman2804', '1': 'https://api.chess.com/pub/player/koltcho69', '2': 'https://api.chess.com/pub/player/enhmandah', '3': 'https://api.chess.com/pub/player/-amos-'}, 'white_rating': {'0': 1708, '1': 1726, '2': 1727, '3': 819}, 'black_rating': {'0': 1608, '1': 1577, '2': 842, '3': 1727}, 'white_result': {'0': 'win', '1': 'win', '2': 'win', '3': 'checkmated'}, 'black_result': {'0': 'checkmated', '1': 'resigned', '2': 'resigned', '3': 'win'}, 'time_class': {'0': 'daily', '1': 'daily', '2': 'daily', '3': 'daily'}, 'time_control': {'0': '1/259200', '1': '1/172800', '2': '1/172800', '3': '1/172800'}, 'rules': {'0': 'chess', '1': 'chess', '2': 'chess', '3': 'chess'}, 'rated': {'0': True, '1': True, '2': True, '3': True}, 'fen': {'0': 'r2r4/p2p1p1p/b6R/n1p1kp2/2P2P2/3BP3/PP5P/4K2R b K f3 1 22', '1': '8/5Q1k/4n1pp/8/7P/2N2b2/PP3P2/5K2 b - - 1 33', '2': 'rn1q1b1r/kb2p1pp/2p5/p1Q5/N1BP2n1/4PN2/1P3PPP/R1B1K2R b KQ - 5 15', '3': 'r3kb1r/pp3ppp/3p1n2/2pKp3/P3P3/1P6/4qP1P/QNB5 w kq - 3 17'}, 'pgn': {'0': '[Event "Enjoyable games 2 - Round 1"]\n[Site "Chess.com"]\n[Date "2013.01.30"]\n[Round "-"]\n[White "-Amos-"]\n[Black "miniman2804"]\n[Result "1-0"]\n[Tournament "https://www.chess.com/tournament/enjoyable-games-2"]\n[CurrentPosition "r2r4/p2p1p1p/b6R/n1p1kp2/2P2P2/3BP3/PP5P/4K2R b K f3 1 22"]\n[Timezone "UTC"]\n[ECO "E22"]\n[ECOUrl "https://www.chess.com/openings/Nimzo-Indian-Defense-Spielmann-Variation"]\n[UTCDate "2013.01.30"]\n[UTCTime "16:35:14"]\n[WhiteElo "1708"]\n[BlackElo "1608"]\n[TimeControl "1/259200"]\n[Termination "-Amos- won by checkmate"]\n[StartTime "16:35:14"]\n[EndDate "2013.02.01"]\n[EndTime "18:14:48"]\n[Link "https://www.chess.com/game/daily/64629816"]\n\n1. d4 Nf6 2. c4 e6 3. Nc3 Bb4 4. Qb3 Bxc3+ 5. Qxc3 O-O 6. Bg5 c5 7. dxc5 Nc6 8. Nf3 Qa5 9. Bxf6 gxf6 10. Qxa5 Nxa5 11. e3 Rd8 12. Rd1 Kg7 13. Be2 b6 14. Rd4 bxc5 15. Rg4+ Kh6 16. Bd3 f5 17. Rh4+ Kg6 18. g4 Ba6 19. gxf5+ exf5 20. Ne5+ Kf6 21. Rh6+ Kxe5 22. f4# 1-0\n', '1': '[Event "Rapid Rats - Board 5"]\n[Site "Chess.com"]\n[Date "2013.01.19"]\n[Round "-"]\n[White "-Amos-"]\n[Black "koltcho69"]\n[Result "1-0"]\n[Match "https://www.chess.com/club/matches/219602"]\n[CurrentPosition "8/5Q1k/4n1pp/8/7P/2N2b2/PP3P2/5K2 b - - 1 33"]\n[Timezone "UTC"]\n[ECO "C53"]\n[ECOUrl "https://www.chess.com/openings/Giuoco-Piano-Game-Main-Line"]\n[UTCDate "2013.01.19"]\n[UTCTime "14:29:25"]\n[WhiteElo "1726"]\n[BlackElo "1577"]\n[TimeControl "1/172800"]\n[Termination "-Amos- won by resignation"]\n[StartTime "14:29:25"]\n[EndDate "2013.02.01"]\n[EndTime "18:22:03"]\n[Link "https://www.chess.com/game/daily/64070770"]\n\n1. e4 e5 2. Nf3 Nc6 3. Bc4 Bc5 4. c3 a6 5. d4 exd4 6. cxd4 Be7 7. Qb3 Na5 8. Qc2 Nxc4 9. Qxc4 d6 10. Nc3 c6 11. O-O h6 12. Re1 Nf6 13. d5 c5 14. e5 dxe5 15. Nxe5 O-O 16. Ng6 Re8 17. Rxe7 Rxe7 18. Nxe7+ Qxe7 19. Bf4 b5 20. d6 Qd7 21. Qxc5 Bb7 22. Qc7 Qxc7 23. dxc7 Nd5 24. Rd1 Nxf4 25. Rd8+ Kh7 26. Rxa8 Bxa8 27. c8=Q Bxg2 28. Qxa6 Bf3 29. Qxb5 Nh3+ 30. Kf1 g6 31. Qd7 Ng5 32. h4 Ne6 33. Qxf7+ 1-0\n', '2': '[Event "CHESS BOARD CLASH - Round 1"]\n[Site "Chess.com"]\n[Date "2013.02.01"]\n[Round "-"]\n[White "-Amos-"]\n[Black "enhmandah"]\n[Result "1-0"]\n[Tournament "https://www.chess.com/tournament/just-another-clash"]\n[CurrentPosition "rn1q1b1r/kb2p1pp/2p5/p1Q5/N1BP2n1/4PN2/1P3PPP/R1B1K2R b KQ - 5 15"]\n[Timezone "UTC"]\n[ECO "D00"]\n[ECOUrl "https://www.chess.com/openings/Queens-Pawn-Opening-1...d5-2.e3"]\n[UTCDate "2013.02.01"]\n[UTCTime "11:24:19"]\n[WhiteElo "1727"]\n[BlackElo "842"]\n[TimeControl "1/172800"]\n[Termination "-Amos- won by resignation"]\n[StartTime "11:24:19"]\n[EndDate "2013.02.02"]\n[EndTime "17:58:11"]\n[Link "https://www.chess.com/game/daily/64714474"]\n\n1. d4 d5 2. e3 c6 3. c4 dxc4 4. Bxc4 b5 5. Bb3 a5 6. Qf3 Bb7 7. Bxf7+ Kd7 8. Qf5+ Kc7 9. Nf3 Nh6 10. Qe5+ Kb6 11. a4 bxa4 12. Nc3 Ng4 13. Nxa4+ Ka6 14. Bc4+ Ka7 15. Qc5+ 1-0\n', '3': '[Event "CHESS BOARD CLASH - Round 1"]\n[Site "Chess.com"]\n[Date "2013.02.01"]\n[Round "-"]\n[White "enhmandah"]\n[Black "-Amos-"]\n[Result "0-1"]\n[Tournament "https://www.chess.com/tournament/just-another-clash"]\n[CurrentPosition "r3kb1r/pp3ppp/3p1n2/2pKp3/P3P3/1P6/4qP1P/QNB5 w kq - 3 17"]\n[Timezone "UTC"]\n[ECO "B20"]\n[ECOUrl "https://www.chess.com/openings/Sicilian-Defense-Snyder-Variation"]\n[UTCDate "2013.02.01"]\n[UTCTime "11:24:17"]\n[WhiteElo "819"]\n[BlackElo "1727"]\n[TimeControl "1/172800"]\n[Termination "-Amos- won by checkmate"]\n[StartTime "11:24:17"]\n[EndDate "2013.02.02"]\n[EndTime "18:29:41"]\n[Link "https://www.chess.com/game/daily/64714344"]\n\n1. e4 c5 2. b3 Nc6 3. a4 d6 4. Bb5 Bd7 5. Qf3 Nd4 6. Qf4 e5 7. Qg3 Nxc2+ 8. Kd1 Nxa1 9. Qc3 Bxb5 10. Qxa1 Qg5 11. d3 Qxg2 12. Ne2 Qxh1+ 13. Kd2 Bxd3 14. Kxd3 Qd1+ 15. Kc4 Qxe2+ 16. Kd5 Nf6# 0-1\n'}} <end_description>
1,373
0
3,545
1,373
69003792
# ## **Titanic Competition** # this is my first published notebook about the kaggle competition, I have synthesized the results from various notebooks and experimented on my own on the data and reached **0.796 score**, I hope this notebook is useful for someone # * **Your feedback is welcome** # * **Commented code is things i have tried that hasn't worked** # importing some useful libraries import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier from xgboost.sklearn import XGBClassifier import matplotlib.pyplot as plt import seaborn as sns sns.set() # reading the data train = pd.read_csv("../input/titanic/train.csv") test = pd.read_csv("../input/titanic/test.csv") train.head() train.shape test.shape # # Exploring missing values train.isnull().sum() # to_show_null_data(Age and cabin embarked) test.isnull().sum() # to_show_null_data(AGE and cabin) # # Plotting some useful visualizations about the features def bar_chart(feature): survived = train[train["Survived"] == 1][feature].value_counts() dead = train[train["Survived"] == 0][feature].value_counts() df = pd.DataFrame([survived, dead]) df.index = ["Survived", "Dead"] df.plot(kind="bar", stacked=True, figsize=(10, 5)) bar_chart("Sex") bar_chart("Pclass") bar_chart("Embarked") bar_chart("Parch") bar_chart("SibSp") all_data = [train, test] for data in all_data: data["Status"] = data["Name"].str.extract(" ([A-Za-z]+)\.", expand=False) train.Status.unique() for dataset in all_data: dataset["Status"] = dataset["Status"].replace( [ "Lady", "Countess", "Capt", "Col", "Don", "Dr", "Major", "Rev", "Sir", "Jonkheer", "Dona", ], "Rare", ) dataset["Status"] = dataset["Status"].replace("Mlle", "Miss") dataset["Status"] = dataset["Status"].replace("Ms", "Miss") dataset["Status"] = dataset["Status"].replace("Mme", "Mrs") train.isnull().sum() test.head(10) bar_chart("Status") status_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} for dataset in all_data: dataset["Status"] = dataset["Status"].map(status_mapping) dataset["Status"] = dataset["Status"].fillna(0) train.Status.unique() bar_chart("Status") # # Feature engineering train["FamilySize"] = train["SibSp"] + train["Parch"] + 1 test["FamilySize"] = test["SibSp"] + test["Parch"] + 1 train.head() sex_mapping = {"male": 0, "female": 1} for dataset in all_data: dataset["Sex"] = dataset["Sex"].map(sex_mapping) for dataset in all_data: dataset["IsAlone"] = 0 dataset.loc[dataset["FamilySize"] == 1, "IsAlone"] = 1 train["Cabin_category"] = train["Cabin"].astype(str).str[0] train["Cabin_category"] = train["Cabin_category"].map( {"A": 1, "B": 2, "C": 2, "D": 3, "E": 4, "F": 5, "G": 6, "T": 7} ) train["Cabin_category"] = train["Cabin_category"].fillna(0) # Cabin Grouping train["HasCabin"] = train["Cabin"].apply(lambda x: 0 if x is np.nan else 1) test["Cabin_category"] = test["Cabin"].astype(str).str[0] test["Cabin_category"] = test["Cabin_category"].map( {"A": 1, "B": 2, "C": 2, "D": 3, "E": 4, "F": 5, "G": 6, "T": 7} ) test["Cabin_category"] = test["Cabin_category"].fillna(0) # Cabin Grouping test["HasCabin"] = test["Cabin"].apply(lambda x: 0 if x is np.nan else 1) train.head() # # Filling in missing data train.isnull().sum() train["Age"].fillna(train.groupby("Status")["Age"].transform("median"), inplace=True) test["Age"].fillna(test.groupby("Status")["Age"].transform("median"), inplace=True) train["Fare"].fillna( train.groupby(["Pclass", "Parch", "SibSp"])["Fare"].median()[3][0][0], inplace=True ) test["Fare"].fillna( test.groupby(["Pclass", "Parch", "SibSp"])["Fare"].median()[3][0][0], inplace=True ) train["Embarked"].fillna("S", inplace=True) test["Embarked"].fillna("S", inplace=True) train.isnull().sum() train.isnull().sum() train.groupby("Status")["Age"].transform("median") # # More visualizations facet = sns.FacetGrid(train, hue="Survived", aspect=4) facet.map(sns.kdeplot, "Age", shade=True) facet.set(xlim=(0, train["Age"].max())) facet.add_legend() plt.show() facet = sns.FacetGrid(train, hue="Survived", aspect=4) facet.map(sns.kdeplot, "Age", shade=True) facet.set(xlim=(0, train["Age"].max())) facet.add_legend() plt.xlim(0, 20) facet = sns.FacetGrid(train, hue="Survived", aspect=4) facet.map(sns.kdeplot, "Age", shade=True) facet.set(xlim=(0, train["Age"].max())) facet.add_legend() plt.xlim(20, 30) facet = sns.FacetGrid(train, hue="Survived", aspect=4) facet.map(sns.kdeplot, "Age", shade=True) facet.set(xlim=(0, train["Age"].max())) facet.add_legend() plt.xlim(30, 40) facet = sns.FacetGrid(train, hue="Survived", aspect=4) facet.map(sns.kdeplot, "Age", shade=True) facet.set(xlim=(0, train["Age"].max())) facet.add_legend() plt.xlim(40, 60) train.head() # # Training our model and making predictions from sklearn.metrics import accuracy_score from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split y_full = train["Survived"] features = [ "Pclass", "Sex", "Age", "IsAlone", "FamilySize", "Status", "Embarked", "Fare", "Cabin_category", "HasCabin", ] X_full = pd.get_dummies(train[features]) X_test_full = pd.get_dummies(test[features]) X_train, X_valid, y_train, y_valid = train_test_split( X_full, y_full, train_size=0.7, test_size=0.3, random_state=42 ) rf_model = RandomForestClassifier( n_estimators=100, max_depth=3, max_features=0.9, random_state=3, oob_score=True ) rf_model.fit(X_train, y_train) rf_val_predictions = rf_model.predict(X_valid) feature_importances = pd.Series(rf_model.feature_importances_, X_full.columns) feature_importances.sort_values(inplace=True) feature_importances.plot(kind="barh", figsize=(7, 6)) # * **Scoring our model** rf_accuracy = accuracy_score(rf_val_predictions, y_valid) rf_accuracy rf_model.fit(X_full, y_full) predictions = rf_model.predict(X_test_full) # # Preparing and formatting our submissions output = pd.DataFrame({"PassengerId": test.PassengerId, "Survived": predictions}) output.to_csv("my_submission.csv", index=False)
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003792.ipynb
null
null
[{"Id": 69003792, "ScriptId": 18472411, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6890835, "CreationDate": "07/25/2021 17:59:50", "VersionNumber": 139.0, "Title": "titanic-kaggle-solution", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 201.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 195.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# ## **Titanic Competition** # this is my first published notebook about the kaggle competition, I have synthesized the results from various notebooks and experimented on my own on the data and reached **0.796 score**, I hope this notebook is useful for someone # * **Your feedback is welcome** # * **Commented code is things i have tried that hasn't worked** # importing some useful libraries import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier from xgboost.sklearn import XGBClassifier import matplotlib.pyplot as plt import seaborn as sns sns.set() # reading the data train = pd.read_csv("../input/titanic/train.csv") test = pd.read_csv("../input/titanic/test.csv") train.head() train.shape test.shape # # Exploring missing values train.isnull().sum() # to_show_null_data(Age and cabin embarked) test.isnull().sum() # to_show_null_data(AGE and cabin) # # Plotting some useful visualizations about the features def bar_chart(feature): survived = train[train["Survived"] == 1][feature].value_counts() dead = train[train["Survived"] == 0][feature].value_counts() df = pd.DataFrame([survived, dead]) df.index = ["Survived", "Dead"] df.plot(kind="bar", stacked=True, figsize=(10, 5)) bar_chart("Sex") bar_chart("Pclass") bar_chart("Embarked") bar_chart("Parch") bar_chart("SibSp") all_data = [train, test] for data in all_data: data["Status"] = data["Name"].str.extract(" ([A-Za-z]+)\.", expand=False) train.Status.unique() for dataset in all_data: dataset["Status"] = dataset["Status"].replace( [ "Lady", "Countess", "Capt", "Col", "Don", "Dr", "Major", "Rev", "Sir", "Jonkheer", "Dona", ], "Rare", ) dataset["Status"] = dataset["Status"].replace("Mlle", "Miss") dataset["Status"] = dataset["Status"].replace("Ms", "Miss") dataset["Status"] = dataset["Status"].replace("Mme", "Mrs") train.isnull().sum() test.head(10) bar_chart("Status") status_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} for dataset in all_data: dataset["Status"] = dataset["Status"].map(status_mapping) dataset["Status"] = dataset["Status"].fillna(0) train.Status.unique() bar_chart("Status") # # Feature engineering train["FamilySize"] = train["SibSp"] + train["Parch"] + 1 test["FamilySize"] = test["SibSp"] + test["Parch"] + 1 train.head() sex_mapping = {"male": 0, "female": 1} for dataset in all_data: dataset["Sex"] = dataset["Sex"].map(sex_mapping) for dataset in all_data: dataset["IsAlone"] = 0 dataset.loc[dataset["FamilySize"] == 1, "IsAlone"] = 1 train["Cabin_category"] = train["Cabin"].astype(str).str[0] train["Cabin_category"] = train["Cabin_category"].map( {"A": 1, "B": 2, "C": 2, "D": 3, "E": 4, "F": 5, "G": 6, "T": 7} ) train["Cabin_category"] = train["Cabin_category"].fillna(0) # Cabin Grouping train["HasCabin"] = train["Cabin"].apply(lambda x: 0 if x is np.nan else 1) test["Cabin_category"] = test["Cabin"].astype(str).str[0] test["Cabin_category"] = test["Cabin_category"].map( {"A": 1, "B": 2, "C": 2, "D": 3, "E": 4, "F": 5, "G": 6, "T": 7} ) test["Cabin_category"] = test["Cabin_category"].fillna(0) # Cabin Grouping test["HasCabin"] = test["Cabin"].apply(lambda x: 0 if x is np.nan else 1) train.head() # # Filling in missing data train.isnull().sum() train["Age"].fillna(train.groupby("Status")["Age"].transform("median"), inplace=True) test["Age"].fillna(test.groupby("Status")["Age"].transform("median"), inplace=True) train["Fare"].fillna( train.groupby(["Pclass", "Parch", "SibSp"])["Fare"].median()[3][0][0], inplace=True ) test["Fare"].fillna( test.groupby(["Pclass", "Parch", "SibSp"])["Fare"].median()[3][0][0], inplace=True ) train["Embarked"].fillna("S", inplace=True) test["Embarked"].fillna("S", inplace=True) train.isnull().sum() train.isnull().sum() train.groupby("Status")["Age"].transform("median") # # More visualizations facet = sns.FacetGrid(train, hue="Survived", aspect=4) facet.map(sns.kdeplot, "Age", shade=True) facet.set(xlim=(0, train["Age"].max())) facet.add_legend() plt.show() facet = sns.FacetGrid(train, hue="Survived", aspect=4) facet.map(sns.kdeplot, "Age", shade=True) facet.set(xlim=(0, train["Age"].max())) facet.add_legend() plt.xlim(0, 20) facet = sns.FacetGrid(train, hue="Survived", aspect=4) facet.map(sns.kdeplot, "Age", shade=True) facet.set(xlim=(0, train["Age"].max())) facet.add_legend() plt.xlim(20, 30) facet = sns.FacetGrid(train, hue="Survived", aspect=4) facet.map(sns.kdeplot, "Age", shade=True) facet.set(xlim=(0, train["Age"].max())) facet.add_legend() plt.xlim(30, 40) facet = sns.FacetGrid(train, hue="Survived", aspect=4) facet.map(sns.kdeplot, "Age", shade=True) facet.set(xlim=(0, train["Age"].max())) facet.add_legend() plt.xlim(40, 60) train.head() # # Training our model and making predictions from sklearn.metrics import accuracy_score from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split y_full = train["Survived"] features = [ "Pclass", "Sex", "Age", "IsAlone", "FamilySize", "Status", "Embarked", "Fare", "Cabin_category", "HasCabin", ] X_full = pd.get_dummies(train[features]) X_test_full = pd.get_dummies(test[features]) X_train, X_valid, y_train, y_valid = train_test_split( X_full, y_full, train_size=0.7, test_size=0.3, random_state=42 ) rf_model = RandomForestClassifier( n_estimators=100, max_depth=3, max_features=0.9, random_state=3, oob_score=True ) rf_model.fit(X_train, y_train) rf_val_predictions = rf_model.predict(X_valid) feature_importances = pd.Series(rf_model.feature_importances_, X_full.columns) feature_importances.sort_values(inplace=True) feature_importances.plot(kind="barh", figsize=(7, 6)) # * **Scoring our model** rf_accuracy = accuracy_score(rf_val_predictions, y_valid) rf_accuracy rf_model.fit(X_full, y_full) predictions = rf_model.predict(X_test_full) # # Preparing and formatting our submissions output = pd.DataFrame({"PassengerId": test.PassengerId, "Survived": predictions}) output.to_csv("my_submission.csv", index=False)
false
0
2,174
0
2,174
2,174
69003465
# # Titanic - 1st Attempt # ## Read data import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns train_data = pd.read_csv("../input/titanic/train.csv") test_data = pd.read_csv("../input/titanic/test.csv") y = train_data.pop("Survived") # ## Data Visualization train_data.head() test_data.head() # ## Handle missing values train_data.isna().sum() test_data.isna().sum() train_data = train_data.fillna({"Age": train_data["Age"].median(), "Embarked": "S"}) test_data = test_data.fillna( {"Age": test_data["Age"].median(), "Fare": test_data["Fare"].median()} ) # ## Feature Engineering possible_drops = ["Name"] # Add new feature 'FamilyMembers' using SibSp and Parch. Adding 1 to count the person itself. train_data["FamilyMembers"] = train_data.SibSp + train_data.Parch + 1 test_data["FamilyMembers"] = test_data.SibSp + test_data.Parch + 1 # possible_drops.extend(['SibSp']) # Converting cabin feature in to has cabin, depending on whether the cabin data is available or not. train_data["HasCabin"] = train_data["Cabin"].apply( lambda x: 0 if type(x) == float else 1 ) test_data["HasCabin"] = test_data["Cabin"].apply(lambda x: 0 if type(x) == float else 1) possible_drops.append("Cabin") # TODO: try to convert fare into ranges. possible_drop.append("Fare") # from mlxtend.preprocessing import minmax_scaling # train_data['Fare'] = minmax_scaling(train_data['Fare'].to_nparray(), columns=[0]) # TODO: handle ticket feature possible_drops.append("Ticket") # ### Drop unnecessary columns possible_drops X = train_data.drop(possible_drops, axis=1) X_test = test_data.drop(possible_drops, axis=1) # ### Encode categorical data features = X.columns X = pd.get_dummies(X) X_test = pd.get_dummies(X_test) X.head() X_test.head() # ## Define the Random Forest Model from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score model = RandomForestClassifier(n_estimators=2000, max_depth=6, random_state=0) # ## Training model.fit(X, y) # ## Predictions predictions = model.predict(X_test) output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions}) output["Survived"].value_counts() # Save predictions output.to_csv("predictions.csv", index=False) print("Your submission was successfully saved!")
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003465.ipynb
null
null
[{"Id": 69003465, "ScriptId": 18805935, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890050, "CreationDate": "07/25/2021 17:54:06", "VersionNumber": 4.0, "Title": "Titanic - 1", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 92.0, "LinesInsertedFromPrevious": 8.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 84.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
null
null
null
null
# # Titanic - 1st Attempt # ## Read data import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns train_data = pd.read_csv("../input/titanic/train.csv") test_data = pd.read_csv("../input/titanic/test.csv") y = train_data.pop("Survived") # ## Data Visualization train_data.head() test_data.head() # ## Handle missing values train_data.isna().sum() test_data.isna().sum() train_data = train_data.fillna({"Age": train_data["Age"].median(), "Embarked": "S"}) test_data = test_data.fillna( {"Age": test_data["Age"].median(), "Fare": test_data["Fare"].median()} ) # ## Feature Engineering possible_drops = ["Name"] # Add new feature 'FamilyMembers' using SibSp and Parch. Adding 1 to count the person itself. train_data["FamilyMembers"] = train_data.SibSp + train_data.Parch + 1 test_data["FamilyMembers"] = test_data.SibSp + test_data.Parch + 1 # possible_drops.extend(['SibSp']) # Converting cabin feature in to has cabin, depending on whether the cabin data is available or not. train_data["HasCabin"] = train_data["Cabin"].apply( lambda x: 0 if type(x) == float else 1 ) test_data["HasCabin"] = test_data["Cabin"].apply(lambda x: 0 if type(x) == float else 1) possible_drops.append("Cabin") # TODO: try to convert fare into ranges. possible_drop.append("Fare") # from mlxtend.preprocessing import minmax_scaling # train_data['Fare'] = minmax_scaling(train_data['Fare'].to_nparray(), columns=[0]) # TODO: handle ticket feature possible_drops.append("Ticket") # ### Drop unnecessary columns possible_drops X = train_data.drop(possible_drops, axis=1) X_test = test_data.drop(possible_drops, axis=1) # ### Encode categorical data features = X.columns X = pd.get_dummies(X) X_test = pd.get_dummies(X_test) X.head() X_test.head() # ## Define the Random Forest Model from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score model = RandomForestClassifier(n_estimators=2000, max_depth=6, random_state=0) # ## Training model.fit(X, y) # ## Predictions predictions = model.predict(X_test) output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions}) output["Survived"].value_counts() # Save predictions output.to_csv("predictions.csv", index=False) print("Your submission was successfully saved!")
false
0
768
0
768
768