script
stringlengths 113
767k
|
---|
# #**FIRST DATAFRAME**
# New Code
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
# URL of the first page to scrape
url1 = "https://en.wikipedia.org/wiki/List_of_Falcon_9_and_Falcon_Heavy_launches_(2010%E2%80%932019)"
# Send a GET request to the URL and parse the HTML content using BeautifulSoup
response1 = requests.get(url1)
soup1 = BeautifulSoup(response1.content, "html.parser")
# Find the tables containing the launches data by searching for the tables with class "wikitable"
tables1 = soup1.find_all("table", {"class": "wikitable"})
# Create an empty list to store the DataFrames
df_list = []
# Use pandas to read the table data for each table in the first URL
for i in range(7): # loop through the first 7 tables
df = pd.read_html(str(tables1[i]))[0]
df_list.append(df) # add the DataFrame to the list
# Concatenate the DataFrames in the list into a single DataFrame and drop any duplicate rows based on the "Flight No." column
final_df1 = pd.concat(df_list)
final_df1 = final_df1.drop_duplicates(subset=["Flight No."], keep="first")
# Concatenate the DataFrames in the list into a single DataFrame and drop any duplicate rows based on the "Flight No." column
final_df1 = pd.concat(df_list)
final_df1 = final_df1.drop_duplicates(subset=["Flight No."], keep="first")
# cleaning
final_df1 = final_df1.replace(np.nan, "", regex=True)
# final_df1['Launch Sites'] = final_df1['Launch site'].astype(str) + ' ' + final_df1['Launchsite'].astype(str)
final_df1["Versions, Boosters"] = (
final_df1["Version, Booster [a]"].astype(str)
+ " "
+ final_df1["Version, Booster[a]"].astype(str)
)
final_df1 = final_df1.drop(["Version, Booster [a]", "Version, Booster[a]"], axis=1)
final_df1.rename(
columns={
"Date andtime (UTC)": "Date and Time (UTC)",
"Payload[b]": "Payload",
"Payload mass": "Payload Mass",
"Launchoutcome": "Launch Outcome",
"Boosterlanding": "Booster Landing",
},
inplace=True,
)
final_df1
final_df1.isna().sum()
# #**SECOND DATAFRAME**
############################# LINK 2
# URL of the second page to scrape
url2 = "https://en.wikipedia.org/wiki/List_of_Falcon_9_and_Falcon_Heavy_launches"
# Send a GET request to the URL and parse the HTML content using BeautifulSoup
response2 = requests.get(url2)
soup2 = BeautifulSoup(response2.content, "html.parser")
# Find the tables containing the launches data by searching for the tables with class "wikitable"
tables2 = soup2.find_all("table", {"class": "wikitable"})
# Create an empty list to store the DataFrames
df_list2 = []
# Use pandas to read the table data for each table in the second URL
for i in range(4): # loop through the first 4 tables
df = pd.read_html(str(tables2[i]))[0]
df_list2.append(df) # add the DataFrame to the list
# Concatenate the DataFrames in the list into a single DataFrame and drop any duplicate rows based on the "Flight No." column
final_df2 = pd.concat(df_list2)
final_df2 = final_df2.drop_duplicates(subset=["Flight No."], keep="first")
final_df2.rename(
columns={
"Date andtime (UTC)": "Date and Time (UTC)",
"Version, booster[b]": "Versions, Boosters",
"Launchsite": "Launch Sites",
"Payload[c]": "Payload",
"Payload mass": "Payload Mass",
"Launchoutcome": "Launch Outcome",
"Boosterlanding": "Booster Landing",
},
inplace=True,
)
# Print the final DataFrame
final_df2
final_df2.isna().sum()
# #**Final DataFrame**
############################# NEW DATAFRAME
# Concatenate the DataFrames obtained from both URLs into a single DataFrame
final_df = pd.concat([final_df1, final_df2])
############################# MANIPULATION
# Print the final DataFrame
final_df.to_csv("SpaceX Launches.csv", index=False)
final_df
final_df.isna().sum()
# #**Missing Chart**
import missingno as msno
msno.bar(final_df, figsize=(10, 2))
|
# First we have to import all of our essential packages
import numpy as np
import pandas as pd
import sqlite3 as sql
import plotly.express as px
import os
import matplotlib.pyplot as plt
# Get the directory name so that we can access the document
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/craigslist-carstrucks-data/vehicles.csv", delimiter=",")
print(df.head(3))
db_path = "/kaggle/input/craigslist-carstrucks-data/vehicles.csv"
connection = sql.connect(db_path)
print("SQL database connected")
# **What percent of listings are Carvana listings?**
carvana_count = df["description"].str.contains("Carvana", case=False).sum()
total_count = len(df.index)
carvana_percentage = (carvana_count / total_count) * 100
print(f"Carvana is dominating {carvana_percentage:.2f}% of the listings.")
# * CATEGORIZE TYPES OF SALES: DEALERSHIP, CARVANA AND LIKE, PRIVATE?
# * What are the oldest cars
# * Percent as which for parts or restoring
# * Correlation: mileage and value (ML)
# * average price per region
# * map of supply
# * percentage of electric cars in each state and most
# * map of antique cars
# * map of wrecked cars
# * percentage of cars that get wrecked
# * price predictors
list(df)
manufacturer_counts = df.groupby("manufacturer").agg(
num_listings=("manufacturer", "size"), avg_price=("price", "median")
)
manufacturer_counts = manufacturer_counts.sort_values(
by="num_listings", ascending=False
)
manufacturer_counts
# create a bar chart of number of listings by manufacturer
plt.figure(figsize=(10, 6))
plt.bar(manufacturer_counts.index, manufacturer_counts["num_listings"])
plt.title("Number of Listings by Manufacturer")
plt.xlabel("Manufacturer")
plt.ylabel("Number of Listings")
plt.xticks(rotation=90)
plt.show()
# create a scatterplot of average price vs. number of listings by manufacturer
plt.figure(figsize=(10, 6))
plt.scatter(manufacturer_counts["num_listings"], manufacturer_counts["avg_price"])
plt.title("Average Price vs. Number of Listings by Manufacturer")
plt.xlabel("Number of Listings")
plt.ylabel("Average Price")
plt.show()
# Clearly there are some outliers here because there's no way the average price for a Volvo is $383,755, so we should try to get rid of the outliers for each manufactuer to not skew the data
# Defining a function to remove outliers using z-scores
def remove_outliers(df, col, threshold=3):
df["zscore"] = np.abs((df[col].mean()) / df[col].std())
return df[df["zscore"] <= threshold]
# Group by manufacturer and remove outliers for each group separately
manufacturer_clean = df.groupby("manufacturer").apply(remove_outliers, col="price")
manufacturer_clean
manufacturer_counts_clean = df.groupby("manufacturer").agg(
num_listings=("manufacturer", "size"), avg_price=("price", "median")
)
manufacturer_counts_clean = manufacturer_counts_clean.sort_values(
by="num_listings", ascending=False
)
manufacturer_counts_clean
# Trying to categorize the number of online dealership, physical dealership, and private party dealer to the best of my ability
online_dealerships = ["carvana", "vroom", "shift", "carMax"]
physical_dealerships = [
"finance",
"call",
"guaranteed",
"inspection",
"test drive",
"call us today",
"auction",
"visit our",
"automotive",
]
def categorize_description(description):
if pd.isna(description):
return "Private party"
elif any(keyword in description.lower() for keyword in online_dealerships):
return "Online dealership"
elif any(keyword in description.lower() for keyword in physical_dealerships):
return "Physical dealership"
else:
return "Private party"
# apply the function to each row of the DataFrame
df["category"] = df["description"].apply(categorize_description)
# calculate the percentage of descriptions in each category
category_counts = df["category"].value_counts(normalize=True) * 100
print(category_counts)
import matplotlib.pyplot as plt
# create a bar chart
category_counts.plot(kind="bar")
# set the chart title and axis labels
plt.title("Percentage of Car Listings by Category")
plt.xlabel("Category")
plt.ylabel("Percentage")
# display the chart
plt.show()
df.sort_values(by="year", ascending=1).head(10)
exclude_mask = (
df["description"].str.lower().str.contains("cash for")
| df["description"].str.lower().str.contains("provide photos")
| df["description"].str.lower().str.contains("buying")
)
# create a new DataFrame with the excluded rows removed
df_excluded = df[~exclude_mask].copy()
# sort the remaining rows by year and show the top 5 oldest cars
oldest_cars = clean_df_excluded.sort_values(by="year").head(5)
print(oldest_cars)
|
# ## Dependencies and imports
# In this kernel used github repos [efficientdet-pytorch](https://github.com/rwightman/efficientdet-pytorch) and [pytorch-image-models](https://github.com/rwightman/pytorch-image-models) by [@rwightman](https://www.kaggle.com/rwightman). Don't forget add stars ;)
import torch
import os
from datetime import datetime
import time
import random
import cv2
import pandas as pd
import numpy as np
import albumentations as A
import matplotlib.pyplot as plt
from albumentations.pytorch.transforms import ToTensorV2
from sklearn.model_selection import StratifiedKFold, train_test_split
from torch.utils.data import Dataset, DataLoader, Subset
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from glob import glob
from PIL import Image
import sys
sys.path.append("/kaggle/input/mycode/")
sys.path.append("/kaggle/input/mycode/config")
from efficientdet import EfficientDet, HeadNet
from bench import DetBenchTrain
from model_config import get_efficientdet_config
from train_config import config_train
from config_utils import set_config_writeable, set_config_readonly
def collate_fn(batch):
return tuple(zip(*batch))
def get_transforms():
return A.Compose(
[
A.Resize(height=640, width=640, p=1.0),
ToTensorV2(p=1.0),
],
p=1.0,
bbox_params=A.BboxParams(
format="pascal_voc", min_area=0, min_visibility=0, label_fields=["labels"]
),
)
class JamurDataset(Dataset):
def __init__(self, directory, dataframe, transform=None):
self.data = pd.read_csv(dataframe)
self.directory = directory
self.image_names = pd.unique(self.data["filename"]).tolist()
self.transform = transform
def __len__(self):
return len(self.image_names)
def __getitem__(self, index):
image_name = self.image_names[index]
image = Image.open(f"{self.directory}/{image_name}").convert("RGB")
w, h = image.size
image = np.array(image, dtype=np.float32)
image /= 255.0
boxes = self.data[self.data["filename"] == image_name][
["xmin", "ymin", "xmax", "ymax"]
].values
labels = self.data[self.data["filename"] == image_name][["class"]].values
temp = self.transform(
**{
"image": image,
"bboxes": boxes,
"labels": labels,
}
)
image = temp["image"]
temp["bboxes"] = np.array(temp["bboxes"])
temp["bboxes"][:, [0, 1, 2, 3]] = temp["bboxes"][:, [1, 0, 3, 2]]
labels = np.array(labels).flatten()
target = {
"bboxes": torch.as_tensor(temp["bboxes"], dtype=torch.float32),
"labels": torch.as_tensor(labels, dtype=torch.int64),
"image_id": torch.as_tensor([index]),
"img_size": torch.as_tensor([h, w]),
"img_scale": torch.as_tensor([1.0]),
}
return image, target, index
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
scheduler_params = dict(
mode="min",
factor=0.5,
patience=1,
verbose=False,
threshold=0.0001,
threshold_mode="abs",
cooldown=0,
min_lr=1e-8,
eps=1e-08,
)
class Fitter:
def __init__(self, model, device, config):
self.config = config
self.epoch = 0
self.base_dir = config.folder
self.log_path = f"{self.base_dir}/log.txt"
self.best_summary_loss = 10**5
self.model = model
self.device = device
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=config.lr)
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, **scheduler_params
)
self.train_history = []
self.valid_history = []
def fit(self, train_loader, validation_loader):
for n in range(self.config.n_epochs):
if self.config.verbose:
lr = self.optimizer.param_groups[0]["lr"]
timestamp = datetime.utcnow().isoformat()
self.log(f"\n{timestamp}\nLR: {lr}")
t = time.time()
summary_train_loss = self.train_one_epoch(train_loader)
self.train_history.append(summary_train_loss.avg)
self.log(
f"[RESULT]: Train. Epoch: {self.epoch}, summary_loss: {summary_train_loss.avg:.5f}, time: {(time.time() - t):.5f}"
)
self.save(f"{self.base_dir}/last-checkpoint.bin")
t = time.time()
summary_valid_loss = self.validation(validation_loader)
self.valid_history.append(summary_valid_loss.avg)
self.log(
f"[RESULT]: Val. Epoch: {self.epoch}, summary_loss: {summary_valid_loss.avg:.5f}, time: {(time.time() - t):.5f}"
)
if summary_valid_loss.avg < self.best_summary_loss:
self.best_summary_loss = summary_valid_loss.avg
self.model.eval()
self.save(
f"{self.base_dir}/best-checkpoint-{str(self.epoch).zfill(3)}epoch.bin"
)
for path in sorted(glob(f"{self.base_dir}/best-checkpoint-*epoch.bin"))[
:-3
]:
os.remove(path)
self.scheduler.step(metrics=summary_valid_loss.avg)
self.epoch += 1
def train_one_epoch(self, train_loader):
self.model.train()
summary_loss = AverageMeter()
t = time.time()
for step, (images, targets, image_ids) in enumerate(train_loader):
if self.config.verbose:
if step % self.config.verbose_step == 0:
print(
f"Train Step {step}/{len(train_loader)}, "
+ f"summary_loss: {summary_loss.avg:.5f}, "
+ f"time: {(time.time() - t):.5f}",
end="\r",
)
images = torch.stack(images)
images = images.to(self.device).float()
batch_size = images.shape[0]
boxes = [target["bboxes"].to(self.device).float() for target in targets]
labels = [target["labels"].to(self.device).float() for target in targets]
size = [target["img_size"] for target in targets]
scales = [target["img_scale"].to(self.device).float() for target in targets]
target_res = {}
target_res["bbox"] = boxes
target_res["cls"] = labels
target_res["img_size"] = size
target_res["img_scale"] = scales
self.optimizer.zero_grad()
outputs = self.model(images, target_res)
loss = outputs["loss"]
loss.backward()
summary_loss.update(loss.detach().item(), batch_size)
self.optimizer.step()
return summary_loss
def validation(self, val_loader):
self.model.eval()
summary_loss = AverageMeter()
t = time.time()
for step, (images, targets, image_ids) in enumerate(val_loader):
if self.config.verbose:
if step % self.config.verbose_step == 0:
print(
f"Val Step {step}/{len(val_loader)}, "
+ f"summary_loss: {summary_loss.avg:.5f}, "
+ f"time: {(time.time() - t):.5f}",
end="\r",
)
with torch.no_grad():
images = torch.stack(images)
batch_size = images.shape[0]
images = images.to(self.device).float()
boxes = [target["bboxes"].to(self.device).float() for target in targets]
labels = [
target["labels"].to(self.device).float() for target in targets
]
size = [target["img_size"] for target in targets]
scales = [
target["img_scale"].to(self.device).float() for target in targets
]
target_res = {}
target_res["bbox"] = boxes
target_res["cls"] = labels
target_res["img_size"] = size
target_res["img_scale"] = scales
outputs = self.model(images, target_res)
loss = outputs["loss"]
summary_loss.update(loss.detach().item(), batch_size)
return summary_loss
def save(self, path):
self.model.eval()
torch.save(
{
"model_state_dict": self.model.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"best_summary_loss": self.best_summary_loss,
"epoch": self.epoch,
"train_history": self.train_history,
"valid_history": self.valid_history,
},
path,
)
def load(self, path):
checkpoint = torch.load(path)
self.model.model.load_state_dict(checkpoint["model_state_dict"])
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self.best_summary_loss = checkpoint["best_summary_loss"]
self.epoch = checkpoint["epoch"] + 1
self.train_history = checkpoint["train_history"]
self.valid_history = checkpoint["valid_history"]
def log(self, message):
if self.config.verbose:
print(message)
with open(self.log_path, "a+") as logger:
logger.write(f"{message}\n")
def get_efficientdet(nama):
config = get_efficientdet_config(nama)
set_config_writeable(config)
config.num_classes = 8
config.image_size = (640, 640)
set_config_readonly(config)
net = EfficientDet(config, pretrained_backbone=True)
net.class_net = HeadNet(config, num_outputs=config.num_classes)
return DetBenchTrain(net, config)
device = torch.device("cuda")
torch.cuda.empty_cache()
directory = "/kaggle/input/augfaqih/Gabungan/Gabungan"
dataframe_path = "/kaggle/input/augfaqih/newdata.csv"
dataset = JamurDataset(directory, dataframe_path, transform=get_transforms())
print(dataset.__len__())
df = pd.read_csv(dataframe_path)
image_names = pd.unique(df["filename"]).tolist()
temp = []
for name in image_names:
labels = df[df["filename"] == name][["class"]].values
labels = np.array(labels).flatten()
temp.append(labels[0])
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
train_indices, test_indices, _, _ = train_test_split(
range(dataset.__len__()), temp, stratify=temp, test_size=0.1, random_state=42
)
newtest_indices = []
test_idx = []
for i in test_indices:
if image_names[i].find("New") == -1:
newtest_indices.append(i)
else:
test_idx.append(i)
print(len(test_idx))
newtrain_indices = []
train_idx = []
for i in train_indices:
flag = False
if image_names[i].find("New") != -1:
for j in newtest_indices:
if image_names[i].find(str(image_names[j])) != -1:
flag = True
if flag == True:
train_idx.append(i)
else:
newtrain_indices.append(i)
print(len(newtrain_indices))
for i in test_idx:
newtrain_indices.append(i)
print(len(newtrain_indices))
train_set = Subset(dataset, newtrain_indices)
validation_set = Subset(dataset, newtest_indices)
training_config = config_train()
set_config_writeable(training_config)
training_config.n_epochs = 30
training_config.batch_size = 8
training_config.lr = 0.001
set_config_readonly(training_config)
train_loader = DataLoader(
train_set,
batch_size=training_config.batch_size,
pin_memory=False,
drop_last=True,
num_workers=training_config.num_workers,
collate_fn=collate_fn,
)
validation_loader = DataLoader(
validation_set,
batch_size=training_config.batch_size,
num_workers=training_config.num_workers,
shuffle=False,
pin_memory=False,
collate_fn=collate_fn,
)
model = get_efficientdet("tf_efficientdet_d0")
model.to(device)
fitter = Fitter(model=model, device=device, config=training_config)
fitter.fit(train_loader, validation_loader)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
us_covids = "/kaggle/input/us-coviddatasets-2020to2023/us_covid_deaths.csv"
"""
data cleans
only allowed
->
"""
udoc = pd.read_csv(us_covids)
# collect all deaths
x_deaths = udoc[
(udoc["Age Group"] == "All Ages")
& (udoc["Sex"] == "All Sexes")
& (udoc["Start Date"] == "01/01/2020")
& (udoc["End Date"] == "04/01/2023")
][1:][["State", "COVID-19 Deaths"]].values
collect_only_need = {y: udoc.columns[y] for y in range(0, len(udoc.columns))}
all_columns = set(collect_only_need.keys())
del_columns = set([0, 3, 4, 5, 10, 15])
udocz = [collect_only_need[xx] for xx in list(all_columns.difference(del_columns))]
udoc = udoc[udocz]
# just drop the null values
udoc.dropna(inplace=True)
udoc.reset_index(drop=True, inplace=True)
# drop first row
udoc = udoc.iloc[1:, :]
print(" information about the null values")
print(udoc.isnull().sum().to_markdown())
import datetime
from dateutil import relativedelta
storethat = []
days = []
for x in range(1, len(udoc) + 1):
ys, ms, ds = (int(x) for x in udoc["Start Date"][x].split("/")[::-1])
ye, me, de = (int(x) for x in udoc["End Date"][x].split("/")[::-1])
s = datetime.datetime(ys, ds, ms)
l = datetime.datetime(ye, de, me)
mm = relativedelta.relativedelta(l, s)
days.append(
[
str(mm.years)
+ " years & "
+ str(mm.months)
+ "months , "
+ str(mm.days)
+ " days"
][0]
)
storethat.append([s, l])
okey = {}
def collect_deaths(deaths):
okey = {}
# collect the columns
thecols = np.unique(udoc["State"].values)
for xx in thecols:
# only the columns
labels = dict(
udoc[(udoc["State"] == xx) & (udoc["Sex"] != "All Sexes")][[deaths]].sum()
)
okey[xx] = labels
return pd.DataFrame(okey)
dis = ["Pneumonia Deaths", "Pneumonia and COVID-19 Deaths", "COVID-19 Deaths"]
fulls = [collect_deaths(dis[x]) for x in range(0, len(dis))]
# usa data codes for us states
l = "/kaggle/input/us-coviddatasets-2020to2023/data.csv"
rema = {}
for xx, yy in dict(pd.read_csv(l)[["state", "code"]].values).items():
rema[xx] = yy
rem = {}
for xx, yy in dict(pd.read_csv(l)[["state", "code"]].values).items():
rem[yy] = xx
deaths = pd.DataFrame(pd.concat(fulls))
deaths.columns = deaths.columns.map(rema)
stzz = deaths.columns
deaths = deaths.T
deaths["code"] = stzz
deaths.dropna(inplace=True)
deaths.reset_index(drop=True, inplace=True)
deaths = deaths.set_index("code")
udoc["ym"] = days
# # complete data_analysis for covid19 / other deaths - united-states (2020 to 2023)
# data analysis
import plotly.express as px
# 1
fig = px.bar(
deaths,
labels={
"variable": "affected by",
"code": "state",
"deaths": "counts",
"value": "deaths",
},
height=400,
title="real-time US-States deaths by covid 19 / pneumonia and covid19"
+ "<br>"
+ "data 2020 - 2023",
)
fig.show()
# 2
titles = "List all us-states regions deaths by " + "<br>" + str(dis)
least_5 = deaths.sort_values(by=dis, ascending=False)
fig = px.choropleth(
locations=least_5.index,
locationmode="USA-states",
color=least_5.index,
scope="usa",
width=800,
height=500,
title=titles,
labels={"color": "affected US-STATES", "locations": "code"},
)
fig.show()
# 3
top_5 = deaths.sort_values(by=dis, ascending=False)[0:6]
top_6_states = top_5.index
titles = (
"Top 6 US-States were most deaths occured on"
+ "<br>"
+ str(" | ".join(top_6_states.to_list()))
)
fig = px.choropleth(
locations=top_6_states,
locationmode="USA-states",
scope="usa",
width=800,
height=500,
title=titles,
labels={"color": "highly affected US-STATES"},
color=top_6_states,
)
fig.show()
ca = []
ya = []
xx = 0
udoc = udoc[["State", "Sex", "ym"] + dis]
la = dict(
udoc[
(udoc["State"] == rem[top_6_states[xx]])
& (udoc["Sex"] == "Male")
& (udoc["ym"] == "3 years & 3months , 0 days")
].sum()
)
lb = dict(
udoc[
(udoc["State"] == rem[top_6_states[xx]])
& (udoc["Sex"] == "Male")
& (udoc["ym"] == "0 years & 0months , 29 days")
].sum()
)
lc = dict(
udoc[
(udoc["State"] == rem[top_6_states[xx]])
& (udoc["Sex"] == "Male")
& (udoc["ym"] == "0 years & 0months , 28 days")
].sum()
)
ld = dict(
udoc[
(udoc["State"] == rem[top_6_states[xx]])
& (udoc["Sex"] == "Female")
& (udoc["ym"] == "3 years & 3months , 0 days")
].sum()
)
le = dict(
udoc[
(udoc["State"] == rem[top_6_states[xx]])
& (udoc["Sex"] == "Female")
& (udoc["ym"] == "0 years & 0months , 29 days")
].sum()
)
lf = dict(
udoc[
(udoc["State"] == rem[top_6_states[xx]])
& (udoc["Sex"] == "Female")
& (udoc["ym"] == "0 years & 0months , 28 days")
].sum()
)
for xx in [la, lb, lc, ld, le, lf]:
ca.append(list(xx.values())[-3:])
year_month_days = [
"3 years & 3months , 0 days",
"0 years & 0months , 29 days",
"0 years & 0months , 28 days",
] * 2
dat_dis = pd.DataFrame(ca)
dat_dis.columns = dat_dis.columns.map({0: dis[0], 1: dis[1], 2: dis[2]})
dat_dis["timeline"] = year_month_days
high_death = dat_dis
high_death["sex"] = ["male", "male", "male", "female", "female", "female"]
Male = (
np.array([high_death.stack()[i].values[:3] for i in range(0, len(high_death) - 3)])
.flatten()
.sum()
/ 39200000
* 10
)
Female = (
np.array([high_death.stack()[i].values[:3] for i in range(len(high_death) - 3, 6)])
.flatten()
.sum()
/ 39200000
* 10
)
gender_labels = ["Male", "Female"]
# 4
po = px.bar(
pd.DataFrame({"male": [Male], "female": [Female]}),
width=700,
height=500,
labels={
"gender": "",
"variable": "sex",
"value": "deaths based by population",
"index": "gender",
},
title=rem[top_6_states[0]]
+ " state -"
+ "gender wise deaths"
+ "<br>"
+ "infected by "
+ " | ".join(dis)
+ "<br>"
+ "AFFECTED MOST => "
+ str(gender_labels[np.argmax([Male, Female])]),
)
po.show()
# 5
y1 = udoc[(udoc["State"] == rem[top_6_states[0]]) & (udoc["Sex"] == "All Sexes")].iloc[
:, -3:
]
y2 = udoc[(udoc["State"] == rem[top_6_states[1]]) & (udoc["Sex"] == "All Sexes")].iloc[
:, -3:
]
y3 = udoc[(udoc["State"] == rem[top_6_states[2]]) & (udoc["Sex"] == "All Sexes")].iloc[
:, -3:
]
xz = pd.DataFrame(
{
"US_STATE": [rem[top_6_states[0]], rem[top_6_states[1]], rem[top_6_states[2]]],
"Total_Deaths": [
y1.describe(include="all")["COVID-19 Deaths"]["max"],
y2.describe(include="all")["COVID-19 Deaths"]["max"],
y3.describe(include="all")["COVID-19 Deaths"]["max"],
],
}
)
# 6
yo = px.bar(
xz,
x="US_STATE",
y="Total_Deaths",
color="US_STATE",
title="TOP 3 US-STATES TOTAL DEATHS (COVID-19)" + "<br>" + "FROM 2020 TO 2023",
width=800,
height=300,
)
yo.show()
# # Best place to live until covid stops
bestplace_for_stay_us_few = deaths.sort_values(by=dis, ascending=True)[:3]
bestplace_for_stay_us_few["best place"] = bestplace_for_stay_us_few.index
fg = px.choropleth(
locations=bestplace_for_stay_us_few["best place"],
locationmode="USA-states",
scope="usa",
width=800,
height=500,
color=bestplace_for_stay_us_few["best place"],
labels={"color": "state"},
title="3 best place for live until covid stops on united states"
+ "<br>"
+ " | ".join(bestplace_for_stay_us_few.index),
)
fg.show()
# # All United States => Total Deaths
import plotly.express as px
jj = pd.DataFrame(x_deaths, columns=["state", "deaths"])
xf = px.line(
jj,
x="state",
y="deaths",
title="Live death status of all United States",
labels={"deaths": "Total Deaths", "state": "US-States"},
)
xf.show()
# # For assumptions of per day covid deaths on ALL US-STATES
# ## (3 years data => 2020 to 2023)
import plotly.graph_objects as go
jj = pd.DataFrame(x_deaths, columns=["state", "deaths"])
jj["peryear_deaths"] = jj["deaths"] / 1099
fig = go.Figure()
fig.add_trace(go.Scatter(x=jj["state"], y=jj["peryear_deaths"], mode="lines"))
fig.update_layout(
title="For assumptions per day covid deaths (3 years data => 2020 to 2023)"
+ "<br>"
+ "on all united states"
)
fig.show()
xu = np.argmax(
udoc[
(udoc["State"] == rem[top_6_states[0]])
& (udoc["Sex"] == "All Sexes")
& (udoc["ym"] == "3 years & 3months , 0 days")
]
.max()
.values[-3:]
)
title = "in 2020 to 2023, high deaths caused by " + str(dis[xu])
print(title)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
df = pd.read_csv("/kaggle/input/spam-email/spam.csv")
df.head()
# checking the size of the dataset
df.shape
# checking for null values
df.isnull().sum()
df.duplicated().sum()
df.drop_duplicates(keep="first")
# Label spam mail as 0 and ham mail as 1
df["Category"] = df["Category"].replace({"spam": 0, "ham": 1})
df.head()
# Separating the data as texts and labels
x = df.Message
y = df.Category
print(x)
print(y)
# Splitting the data into train and test data
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=2, test_size=0.2)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
# Feature Extraction
# Transform the text data to feature vectors that can be used as input to the logistc regression
feature_extraction = TfidfVectorizer(min_df=1, stop_words="english", lowercase=True)
x_train_features = feature_extraction.fit_transform(x_train)
x_test_feature = feature_extraction.transform(x_test)
print(x_train_features)
# Convert Y_train and Y_test values as integers
y_train = y_train.astype("int")
y_test = y_test.astype("int")
# Training the model
# Logistic Regression
model = LogisticRegression()
# training the Logistic Regression model with the training data
model.fit(x_train_features, y_train)
# Evaluating the trained model
# prediction on training data
prediction_on_training_data = model.predict(x_train_features)
accuracy_on_training_data = accuracy_score(y_train, prediction_on_training_data)
print("Accuracy on training data is", accuracy_on_training_data)
# prediction on test data
prediction_on_test_data = model.predict(x_test_feature)
accuracy_on_test_data = accuracy_score(y_test, prediction_on_test_data)
print("Accuracy on test data is", accuracy_on_test_data)
# Building a predictive system
input_mail = ["Go until jurong point, crazy.. Available only"]
# convert text to feature vectors
input_data_features = feature_extraction.transform(input_mail)
# making prediction
prediction = model.predict(input_data_features)
print(prediction)
if prediction[0] == 1:
print("Ham mail")
else:
print("Spaim mail")
|
# # Import Libraries
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras.layers import TextVectorization
import re, string
from tensorflow.keras.layers import LSTM, Dense, Embedding, Dropout, LayerNormalization
df = pd.read_csv(
"/kaggle/input/simple-dialogs-for-chatbot/dialogs.txt",
sep="\t",
names=["question", "answer"],
)
print(f"Dataframe size: {len(df)}")
df.head()
# # Data Preprocessing
# ## Data Visualization
df["question tokens"] = df["question"].apply(lambda x: len(x.split()))
df["answer tokens"] = df["answer"].apply(lambda x: len(x.split()))
plt.style.use("fivethirtyeight")
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(20, 5))
sns.set_palette("Set2")
sns.histplot(x=df["question tokens"], data=df, kde=True, ax=ax[0])
sns.histplot(x=df["answer tokens"], data=df, kde=True, ax=ax[1])
sns.jointplot(
x="question tokens",
y="answer tokens",
data=df,
kind="kde",
fill=True,
cmap="YlGnBu",
)
plt.show()
# ## Text Cleaning
def clean_text(text):
text = re.sub("-", " ", text.lower())
text = re.sub("[.]", " . ", text)
text = re.sub("[1]", " 1 ", text)
text = re.sub("[2]", " 2 ", text)
text = re.sub("[3]", " 3 ", text)
text = re.sub("[4]", " 4 ", text)
text = re.sub("[5]", " 5 ", text)
text = re.sub("[6]", " 6 ", text)
text = re.sub("[7]", " 7 ", text)
text = re.sub("[8]", " 8 ", text)
text = re.sub("[9]", " 9 ", text)
text = re.sub("[0]", " 0 ", text)
text = re.sub("[,]", " , ", text)
text = re.sub("[?]", " ? ", text)
text = re.sub("[!]", " ! ", text)
text = re.sub("[$]", " $ ", text)
text = re.sub("[&]", " & ", text)
text = re.sub("[/]", " / ", text)
text = re.sub("[:]", " : ", text)
text = re.sub("[;]", " ; ", text)
text = re.sub("[*]", " * ", text)
text = re.sub("[']", " ' ", text)
text = re.sub('["]', ' " ', text)
text = re.sub("\t", " ", text)
return text
df.drop(columns=["answer tokens", "question tokens"], axis=1, inplace=True)
df["encoder_inputs"] = df["question"].apply(clean_text)
df["decoder_targets"] = df["answer"].apply(clean_text) + " <end>"
df["decoder_inputs"] = "<start> " + df["answer"].apply(clean_text) + " <end>"
df.head(10)
df["encoder input tokens"] = df["encoder_inputs"].apply(lambda x: len(x.split()))
df["decoder input tokens"] = df["decoder_inputs"].apply(lambda x: len(x.split()))
df["decoder target tokens"] = df["decoder_targets"].apply(lambda x: len(x.split()))
plt.style.use("fivethirtyeight")
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))
sns.set_palette("Set2")
sns.histplot(x=df["encoder input tokens"], data=df, kde=True, ax=ax[0])
sns.histplot(x=df["decoder input tokens"], data=df, kde=True, ax=ax[1])
sns.histplot(x=df["decoder target tokens"], data=df, kde=True, ax=ax[2])
sns.jointplot(
x="encoder input tokens",
y="decoder target tokens",
data=df,
kind="kde",
fill=True,
cmap="YlGnBu",
)
plt.show()
print(
f"After preprocessing: {' '.join(df[df['encoder input tokens'].max()==df['encoder input tokens']]['encoder_inputs'].values.tolist())}"
)
print(f"Max encoder input length: {df['encoder input tokens'].max()}")
print(f"Max decoder input length: {df['decoder input tokens'].max()}")
print(f"Max decoder target length: {df['decoder target tokens'].max()}")
df.drop(
columns=[
"question",
"answer",
"encoder input tokens",
"decoder input tokens",
"decoder target tokens",
],
axis=1,
inplace=True,
)
params = {
"vocab_size": 2500,
"max_sequence_length": 30,
"learning_rate": 0.008,
"batch_size": 149,
"lstm_cells": 256,
"embedding_dim": 256,
"buffer_size": 10000,
}
learning_rate = params["learning_rate"]
batch_size = params["batch_size"]
embedding_dim = params["embedding_dim"]
lstm_cells = params["lstm_cells"]
vocab_size = params["vocab_size"]
buffer_size = params["buffer_size"]
max_sequence_length = params["max_sequence_length"]
df.head(10)
# ## Tokenization
vectorize_layer = TextVectorization(
max_tokens=vocab_size,
standardize=None,
output_mode="int",
output_sequence_length=max_sequence_length,
)
vectorize_layer.adapt(
df["encoder_inputs"] + " " + df["decoder_targets"] + " <start> <end>"
)
vocab_size = len(vectorize_layer.get_vocabulary())
print(f"Vocab size: {len(vectorize_layer.get_vocabulary())}")
print(f"{vectorize_layer.get_vocabulary()[:12]}")
def sequences2ids(sequence):
return vectorize_layer(sequence)
def ids2sequences(ids):
decode = ""
if type(ids) == int:
ids = [ids]
for id in ids:
decode += vectorize_layer.get_vocabulary()[id] + " "
return decode
x = sequences2ids(df["encoder_inputs"])
yd = sequences2ids(df["decoder_inputs"])
y = sequences2ids(df["decoder_targets"])
print(f"Question sentence: hi , how are you ?")
print(f'Question to tokens: {sequences2ids("hi , how are you ?")[:10]}')
print(f"Encoder input shape: {x.shape}")
print(f"Decoder input shape: {yd.shape}")
print(f"Decoder target shape: {y.shape}")
print(f"Encoder input: {x[0][:12]} ...")
print(
f"Decoder input: {yd[0][:12]} ..."
) # shifted by one time step of the target as input to decoder is the output of the previous timestep
print(f"Decoder target: {y[0][:12]} ...")
data = tf.data.Dataset.from_tensor_slices((x, yd, y))
data = data.shuffle(buffer_size)
train_data = data.take(int(0.9 * len(data)))
train_data = train_data.cache()
train_data = train_data.shuffle(buffer_size)
train_data = train_data.batch(batch_size)
train_data = train_data.prefetch(tf.data.AUTOTUNE)
train_data_iterator = train_data.as_numpy_iterator()
val_data = data.skip(int(0.9 * len(data))).take(int(0.1 * len(data)))
val_data = val_data.batch(batch_size)
val_data = val_data.prefetch(tf.data.AUTOTUNE)
_ = train_data_iterator.next()
print(f"Number of train batches: {len(train_data)}")
print(f"Number of training data: {len(train_data)*batch_size}")
print(f"Number of validation batches: {len(val_data)}")
print(f"Number of validation data: {len(val_data)*batch_size}")
print(f"Encoder Input shape (with batches): {_[0].shape}")
print(f"Decoder Input shape (with batches): {_[1].shape}")
print(f"Target Output shape (with batches): {_[2].shape}")
# # Build Models
# ## Build Encoder
class Encoder(tf.keras.models.Model):
def __init__(self, units, embedding_dim, vocab_size, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.units = units
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.embedding = Embedding(
vocab_size,
embedding_dim,
name="encoder_embedding",
mask_zero=True,
embeddings_initializer=tf.keras.initializers.GlorotNormal(),
)
self.normalize = LayerNormalization()
self.lstm = LSTM(
units,
dropout=0.4,
return_state=True,
return_sequences=True,
name="encoder_lstm",
kernel_initializer=tf.keras.initializers.GlorotNormal(),
)
def call(self, encoder_inputs):
self.inputs = encoder_inputs
x = self.embedding(encoder_inputs)
x = self.normalize(x)
x = Dropout(0.4)(x)
encoder_outputs, encoder_state_h, encoder_state_c = self.lstm(x)
self.outputs = [encoder_state_h, encoder_state_c]
return encoder_state_h, encoder_state_c
encoder = Encoder(lstm_cells, embedding_dim, vocab_size, name="encoder")
encoder.call(_[0])
# ## Build Decoder
class Decoder(tf.keras.models.Model):
def __init__(self, units, embedding_dim, vocab_size, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.units = units
self.embedding_dim = embedding_dim
self.vocab_size = vocab_size
self.embedding = Embedding(
vocab_size,
embedding_dim,
name="decoder_embedding",
mask_zero=True,
embeddings_initializer=tf.keras.initializers.HeNormal(),
)
self.normalize = LayerNormalization()
self.lstm = LSTM(
units,
dropout=0.4,
return_state=True,
return_sequences=True,
name="decoder_lstm",
kernel_initializer=tf.keras.initializers.HeNormal(),
)
self.fc = Dense(
vocab_size,
activation="softmax",
name="decoder_dense",
kernel_initializer=tf.keras.initializers.HeNormal(),
)
def call(self, decoder_inputs, encoder_states):
x = self.embedding(decoder_inputs)
x = self.normalize(x)
x = Dropout(0.4)(x)
x, decoder_state_h, decoder_state_c = self.lstm(x, initial_state=encoder_states)
x = self.normalize(x)
x = Dropout(0.4)(x)
return self.fc(x)
decoder = Decoder(lstm_cells, embedding_dim, vocab_size, name="decoder")
decoder(_[1][:1], encoder(_[0][:1]))
# ## Build Training Model
class ChatBotTrainer(tf.keras.models.Model):
def __init__(self, encoder, decoder, *args, **kwargs):
super().__init__(*args, **kwargs)
self.encoder = encoder
self.decoder = decoder
def loss_fn(self, y_true, y_pred):
loss = self.loss(y_true, y_pred)
mask = tf.math.logical_not(tf.math.equal(y_true, 0))
mask = tf.cast(mask, dtype=loss.dtype)
loss *= mask
return tf.reduce_mean(loss)
def accuracy_fn(self, y_true, y_pred):
pred_values = tf.cast(tf.argmax(y_pred, axis=-1), dtype="int64")
correct = tf.cast(tf.equal(y_true, pred_values), dtype="float64")
mask = tf.cast(tf.greater(y_true, 0), dtype="float64")
n_correct = tf.keras.backend.sum(mask * correct)
n_total = tf.keras.backend.sum(mask)
return n_correct / n_total
def call(self, inputs):
encoder_inputs, decoder_inputs = inputs
encoder_states = self.encoder(encoder_inputs)
return self.decoder(decoder_inputs, encoder_states)
def train_step(self, batch):
encoder_inputs, decoder_inputs, y = batch
with tf.GradientTape() as tape:
encoder_states = self.encoder(encoder_inputs, training=True)
y_pred = self.decoder(decoder_inputs, encoder_states, training=True)
loss = self.loss_fn(y, y_pred)
acc = self.accuracy_fn(y, y_pred)
variables = self.encoder.trainable_variables + self.decoder.trainable_variables
grads = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(grads, variables))
metrics = {"loss": loss, "accuracy": acc}
return metrics
def test_step(self, batch):
encoder_inputs, decoder_inputs, y = batch
encoder_states = self.encoder(encoder_inputs, training=True)
y_pred = self.decoder(decoder_inputs, encoder_states, training=True)
loss = self.loss_fn(y, y_pred)
acc = self.accuracy_fn(y, y_pred)
metrics = {"loss": loss, "accuracy": acc}
return metrics
model = ChatBotTrainer(encoder, decoder, name="chatbot_trainer")
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
weighted_metrics=["loss", "accuracy"],
)
model(_[:2])
# ## Train Model
history = model.fit(
train_data,
epochs=100,
validation_data=val_data,
callbacks=[
tf.keras.callbacks.TensorBoard(log_dir="logs"),
tf.keras.callbacks.ModelCheckpoint("ckpt", verbose=1, save_best_only=True),
],
)
# # Visualize Metrics
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(20, 5))
ax[0].plot(history.history["loss"], label="loss", c="red")
ax[0].plot(history.history["val_loss"], label="val_loss", c="blue")
ax[0].set_xlabel("Epochs")
ax[1].set_xlabel("Epochs")
ax[0].set_ylabel("Loss")
ax[1].set_ylabel("Accuracy")
ax[0].set_title("Loss Metrics")
ax[1].set_title("Accuracy Metrics")
ax[1].plot(history.history["accuracy"], label="accuracy")
ax[1].plot(history.history["val_accuracy"], label="val_accuracy")
ax[0].legend()
ax[1].legend()
plt.show()
# # Save Model
model.load_weights("ckpt")
model.save("models", save_format="tf")
for idx, i in enumerate(model.layers):
print("Encoder layers:" if idx == 0 else "Decoder layers: ")
for j in i.layers:
print(j)
print("---------------------")
# # Create Inference Model
class ChatBot(tf.keras.models.Model):
def __init__(self, base_encoder, base_decoder, *args, **kwargs):
super().__init__(*args, **kwargs)
self.encoder, self.decoder = self.build_inference_model(
base_encoder, base_decoder
)
def build_inference_model(self, base_encoder, base_decoder):
encoder_inputs = tf.keras.Input(shape=(None,))
x = base_encoder.layers[0](encoder_inputs)
x = base_encoder.layers[1](x)
x, encoder_state_h, encoder_state_c = base_encoder.layers[2](x)
encoder = tf.keras.models.Model(
inputs=encoder_inputs,
outputs=[encoder_state_h, encoder_state_c],
name="chatbot_encoder",
)
decoder_input_state_h = tf.keras.Input(shape=(lstm_cells,))
decoder_input_state_c = tf.keras.Input(shape=(lstm_cells,))
decoder_inputs = tf.keras.Input(shape=(None,))
x = base_decoder.layers[0](decoder_inputs)
x = base_encoder.layers[1](x)
x, decoder_state_h, decoder_state_c = base_decoder.layers[2](
x, initial_state=[decoder_input_state_h, decoder_input_state_c]
)
decoder_outputs = base_decoder.layers[-1](x)
decoder = tf.keras.models.Model(
inputs=[decoder_inputs, [decoder_input_state_h, decoder_input_state_c]],
outputs=[decoder_outputs, [decoder_state_h, decoder_state_c]],
name="chatbot_decoder",
)
return encoder, decoder
def summary(self):
self.encoder.summary()
self.decoder.summary()
def softmax(self, z):
return np.exp(z) / sum(np.exp(z))
def sample(self, conditional_probability, temperature=0.5):
conditional_probability = np.asarray(conditional_probability).astype("float64")
conditional_probability = np.log(conditional_probability) / temperature
reweighted_conditional_probability = self.softmax(conditional_probability)
probas = np.random.multinomial(1, reweighted_conditional_probability, 1)
return np.argmax(probas)
def preprocess(self, text):
text = clean_text(text)
seq = np.zeros((1, max_sequence_length), dtype=np.int32)
for i, word in enumerate(text.split()):
seq[:, i] = sequences2ids(word).numpy()[0]
return seq
def postprocess(self, text):
text = re.sub(" - ", "-", text.lower())
text = re.sub(" [.] ", ". ", text)
text = re.sub(" [1] ", "1", text)
text = re.sub(" [2] ", "2", text)
text = re.sub(" [3] ", "3", text)
text = re.sub(" [4] ", "4", text)
text = re.sub(" [5] ", "5", text)
text = re.sub(" [6] ", "6", text)
text = re.sub(" [7] ", "7", text)
text = re.sub(" [8] ", "8", text)
text = re.sub(" [9] ", "9", text)
text = re.sub(" [0] ", "0", text)
text = re.sub(" [,] ", ", ", text)
text = re.sub(" [?] ", "? ", text)
text = re.sub(" [!] ", "! ", text)
text = re.sub(" [$] ", "$ ", text)
text = re.sub(" [&] ", "& ", text)
text = re.sub(" [/] ", "/ ", text)
text = re.sub(" [:] ", ": ", text)
text = re.sub(" [;] ", "; ", text)
text = re.sub(" [*] ", "* ", text)
text = re.sub(" ['] ", "'", text)
text = re.sub(' ["] ', '"', text)
return text
def call(self, text, config=None):
input_seq = self.preprocess(text)
states = self.encoder(input_seq, training=False)
target_seq = np.zeros((1, 1))
target_seq[:, :] = sequences2ids(["<start>"]).numpy()[0][0]
stop_condition = False
decoded = []
while not stop_condition:
decoder_outputs, new_states = self.decoder(
[target_seq, states], training=False
)
# index=tf.argmax(decoder_outputs[:,-1,:],axis=-1).numpy().item()
index = self.sample(decoder_outputs[0, 0, :]).item()
word = ids2sequences([index])
if word == "<end> " or len(decoded) >= max_sequence_length:
stop_condition = True
else:
decoded.append(index)
target_seq = np.zeros((1, 1))
target_seq[:, :] = index
states = new_states
return self.postprocess(ids2sequences(decoded))
chatbot = ChatBot(model.encoder, model.decoder, name="chatbot")
chatbot.summary()
tf.keras.utils.plot_model(
chatbot.encoder,
to_file="encoder.png",
show_shapes=True,
show_layer_activations=True,
)
tf.keras.utils.plot_model(
chatbot.decoder,
to_file="decoder.png",
show_shapes=True,
show_layer_activations=True,
)
# # Time to Chat
def print_conversation(texts):
for text in texts:
print(f"You: {text}")
print(f"Bot: {chatbot(text)}")
print("========================")
print_conversation(
[
"hi",
"do yo know me?",
"what is your name?",
"you are bot?",
"hi, how are you doing?",
"i'm pretty good. thanks for asking.",
"Don't ever be in a hurry",
"""I'm gonna put some dirt in your eye """,
"""You're trash """,
"""I've read all your research on nano-technology """,
"""You want forgiveness? Get religion""",
"""While you're using the bathroom, i'll order some food.""",
"""Wow! that's terrible.""",
"""We'll be here forever.""",
"""I need something that's reliable.""",
"""A speeding car ran a red light, killing the girl.""",
"""Tomorrow we'll have rice and fish for lunch.""",
"""I like this restaurant because they give you free bread.""",
]
)
|
# # ⚡️ Fast & Memory-Efficient Market Basket Analysis (Polars x Efficient Apriori)
# In this notebook, we efficiently perform market basket analysis on large transactional datasets with `367,049 transactions` **using Polars and efficient-apriori**, instead of traditional libraries like mlxtend and pandas. We preprocess data with Polars, apply the Apriori algorithm for frequent itemsets and association rules, and analyze them using lift, confidence, conviction, and support metrics. Our approach enables the optimization of marketing strategies, product placements, and cross-selling opportunities while **handling larger datasets with improved performance and reduced memory consumption.**
# We using polars instead of pandas
import polars as pl
# Basket Libraries
from efficient_apriori import apriori, generate_rules_apriori
# Runtime
import time
import os.path
start_time = time.time()
# Input data files are available in the read-only "../input/" directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Pre-processing
# To convert the orders dataframe into a list of tuples, I use a list comprehension that iterates over the rows in the data frame and extracts the second item in each row (which is a tuple). I assign this list to a variable called `transactions`.
df_grouped = (
pl.read_csv("../input/51-cluster/367049(labe).csv")[["user_id", "matched_words"]]
.unique(subset=["user_id", "matched_words"], maintain_order=True)
.groupby("user_id")
.agg([pl.col("matched_words")])
)
transactions = [tuple(row[1]) for row in df_grouped.iter_rows()]
transactions[:5]
# ## Apply Apriori Algorithm
# This code applies the Apriori algorithm to the transactions data using the apriori() function from the efficient_apriori library. The `min_support` parameter is set to a value of 7 divided by the total number of transactions, which is a common rule of thumb for selecting a minimum support threshold. The `min_confidence` parameter is set to 0, which means that only rules with perfect confidence will be selected.
# selecting a minimum support threshold
min_support = 7 / len(transactions)
min_confidence = 0
itemsets, rules = apriori(
transactions, min_support=min_support, min_confidence=min_confidence
)
# The apriori() function returns two outputs: `itemsets` and `rules`. itemsets is a list of all the frequent itemsets found in the data, while `rules` is a list of all the strong association rules found in the data, according to the specified min_support and min_confidence thresholds.
# Get Example
for i in range(5):
print(rules[i])
# Each rule is displayed in the form of a dictionary, where the antecedent and consequent items are enclosed in curly braces. The values for confidence, support, lift, and conviction for each rule are also displayed in parentheses, separated by commas. So we have to extract them to dataframe in the next process.
# ## Get the Attributes
# This code takes a list of association rules as input and filters the `rules` to select only those with two items in the antecedent (or left-hand side) and one item in the consequent (or right-hand side). The filtered rules are then sorted by `lift`, which is a measure of the degree to which the presence of one item in the antecedent is associated with the presence of another item in the consequent, relative to the expected frequency of the consequent.
# a list of objects with attributes lhs, rhs, lift, confidence, conviction, and support
filtered_rules = [rule for rule in rules if len(rule.lhs) == 2 and len(rule.rhs) == 1]
# Sort the filtered rules by lift
sorted_rules = sorted(filtered_rules, key=lambda rule: rule.lift)
# Create a list of dictionaries for the DataFrame
rules_data = [
{
"lhs": list(rule.lhs), # Convert lhs to a list
"rhs": list(rule.rhs), # Convert rhs to a list
"lift": rule.lift,
"confidence": rule.confidence,
"conviction": rule.conviction,
"support": rule.support,
}
for rule in sorted_rules
]
# Create a DataFrame from the list of dictionaries
rules_df = pl.DataFrame(rules_data)
# Sort the DataFrame by lift in descending order and display the top 20 rows
rules_df.sort("lift", descending=True).head(20)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Elapsed time: {elapsed_time:.2f} seconds")
|
import numpy as np
import matplotlib.pyplot as plt
import time
from matplotlib.animation import FuncAnimation
from IPython import display
route_frames = []
distance_frame = []
# Create a matrix of cities, with each row being a location in 2-space (function works in n-dimensions).
cities = np.random.RandomState(59).rand(50, 2)
# cities
# Calculate the euclidian distance in n-space of the route r traversing cities c, ending at the path start.
path_distance = lambda r, c: np.sum(
[np.linalg.norm(c[r[p]] - c[r[p - 1]]) for p in range(len(r))]
)
# Reverse the order of all elements from element i to element k in array r.
two_opt_swap = lambda r, i, k: np.concatenate(
(r[0:i], r[k : -len(r) + i - 1 : -1], r[k + 1 : len(r)])
)
def two_opt(
cities, improvement_threshold
): # 2-opt Algorithm adapted from https://en.wikipedia.org/wiki/2-opt
route = np.arange(
cities.shape[0]
) # Make an array of row numbers corresponding to cities.
improvement_factor = 1 # Initialize the improvement factor.
best_distance = path_distance(
route, cities
) # Calculate the distance of the initial path.
# Intial Random Route
plot(route, cities, best_distance)
while (
improvement_factor > improvement_threshold
): # If the route is still improving, keep going!
distance_to_beat = (
best_distance # Record the distance at the beginning of the loop.
)
for swap_first in range(
1, len(route) - 2
): # From each city except the first and last,
for swap_last in range(
swap_first + 1, len(route)
): # to each of the cities following,
new_route = two_opt_swap(
route, swap_first, swap_last
) # try reversing the order of these cities
new_distance = path_distance(
new_route, cities
) # and check the total distance with this modification.
# plot(route,cities)
# Save data for animation
route_frames.append(new_route)
distance_frame.append(new_distance)
if (
new_distance < best_distance
): # If the path distance is an improvement,
route = new_route # make this the accepted best route
best_distance = new_distance # and update the distance corresponding to this route.
# Save data for animation
# route_frames.append(route)
# plot(route,cities)
improvement_factor = (
1 - best_distance / distance_to_beat
) # Calculate how much the route has improved.
print(improvement_factor)
plot(route, cities, new_distance)
return route # When the route is no longer improving substantially, stop searching and return the route.
def plot(route, cities, new_distance):
# Reorder the cities matrix by route order in a new matrix for plotting.
new_cities_order = np.concatenate(
(np.array([cities[route[i]] for i in range(len(route))]), np.array([cities[0]]))
)
plt.title("2-opt Algorithm Distance =%1.3f" % new_distance, fontsize=18)
# Plot the cities.
plt.scatter(cities[:, 0], cities[:, 1])
# Plot the path.
plt.plot(new_cities_order[:, 0], new_cities_order[:, 1])
plt.show()
# Print the route as row numbers and the total distance travelled by the path.
print("Route: " + str(route) + "\n\nDistance: " + str(path_distance(route, cities)))
# Find a good route with 2-opt ("route" gives the order in which to travel to each city by row number.)
t0 = time.time()
route = two_opt(cities, 0.001)
print("\n", (time.time() - t0) / 60, "Minutes process time")
# fig, ax = plt.subplots()
fig = plt.figure()
# lines = ax.plot([], color='red', marker='o', markersize=4)
# line = lines[0]
def animate(frame):
# update plot
plt.cla()
# Reorder the cities matrix by route order in a new matrix for plotting.
new_cities_order = np.concatenate(
(
np.array(
[
cities[route_frames[frame][i]]
for i in range(len(route_frames[frame]))
]
),
np.array([cities[0]]),
)
)
plt.title("2-opt Algorithm Distance =%1.3f" % distance_frame[frame], fontsize=18)
# Plot the cities.
plt.scatter(cities[:, 0], cities[:, 1])
# Plot the path.
plt.plot(new_cities_order[:, 0], new_cities_order[:, 1])
anim = FuncAnimation(fig, animate, frames=len(route_frames), interval=12)
video = anim.to_html5_video()
html = display.HTML(video)
display.display(html)
plt.close()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# importing the required libraries
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import f1_score
# importing algorithms
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
confusion_matrix,
accuracy_score,
ConfusionMatrixDisplay,
classification_report,
)
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_curve, roc_auc_score
sc = StandardScaler()
# ## Reading the data from csv file
data = pd.read_csv(
"/kaggle/input/ip-network-traffic-flows-labeled-with-87-apps/Dataset-Unicauca-Version2-87Atts.csv"
)
data.head()
print("Number of Rows: {}".format(data.shape[0]))
print("Number of Columns: {}".format(data.shape[1]))
# ## Under Sampling
# Graph of Protocol Name vs Frequency
freq_protocol = data["ProtocolName"].value_counts()
# sns.histplot(freq_protocol.values())
application_name = []
frequency_count = []
for key, value in freq_protocol.items():
application_name.append(key)
frequency_count.append(value)
print("Number of Unique Application Names: ", len(freq_protocol))
# graph of top 5 application names
top_values = 5
plt.bar(application_name[:top_values], frequency_count[:top_values])
plt.xlabel("Application Name")
plt.ylabel("Frequency")
# filtering the classes which have more than 10000 rows (occurrences)
requiredProtocolName = []
for key, value in freq_protocol.items():
if value >= 10000:
requiredProtocolName.append(key)
print(requiredProtocolName)
# taking the random 10000 data from the requiredProtocolName
# and forming the dataset
listofDataFrames = []
for protocol in requiredProtocolName:
listofDataFrames.append(
pd.DataFrame(data[data["ProtocolName"] == protocol].sample(n=10000))
)
sampledData = pd.concat(listofDataFrames)
sampledData.shape
# taking random rows and shuffling the dataframe
data = sampledData.sample(frac=1, random_state=1).reset_index()
# remove the rows that contains NULL values
data.dropna(inplace=True)
data.dropna(axis="columns")
data.reset_index(drop=True, inplace=True)
# remove columns which contains zeroes in the data
data = data.loc[:, (data != 0).any(axis=0)]
print("Shape after removing rows with NULL Values")
print("Number of Rows: {}".format(data.shape[0]))
print("Number of Columns: {}".format(data.shape[1]))
# converting the protocol name (target column) to required format (int)
# using LabelEncoder function from sklearn.preprocession library
encoder = LabelEncoder().fit(data["ProtocolName"])
data["ProtocolName"] = encoder.transform(data["ProtocolName"])
values = encoder.inverse_transform(data["ProtocolName"])
target_column = data["ProtocolName"]
# mapping the encoded value
encoded_target_column = {}
for i in range(len(data["ProtocolName"])):
encoded_target_column[data["ProtocolName"][i]] = values[i]
print(encoded_target_column)
# get all the column heads
data.columns
# removing extra columns that are not useful for finding correlation
# axis = 1 because we need to drop the columns
# by default axis = 0 (drop the rows)
dataset = data.drop(
[
"Flow.ID",
"Source.IP",
"Label",
"Timestamp",
"Destination.IP",
"Source.Port",
"Destination.Port",
"Protocol",
],
axis=1,
)
x_dataset = dataset.drop(["ProtocolName"], axis=1)
y_dataset = dataset["ProtocolName"]
# normal dataset
X_train, X_test, y_train, y_test = train_test_split(x_dataset, y_dataset, test_size=0.2)
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# ## Correlation Matrix
# finding the correlation matrix
correlation_matrix = dataset.corr()
correlation_matrix.head()
# plotting the heatmap
plt.figure(figsize=(30, 30))
sns.heatmap(correlation_matrix, cmap="viridis")
plt.plot()
# ## Feature Selection via Correlation Matrix
sorted_corr_matrix_protocolName = correlation_matrix["ProtocolName"].sort_values(
ascending=False
)
allKeys = list(sorted_corr_matrix_protocolName.keys())
# removing the target column
allKeys.remove("ProtocolName")
feature_map = {}
# removing the features which has less than 0.01 correlation
# and grouping the columns which have same correlation (rounding upto 2 decimal places)
for colName in allKeys:
correlation = round(sorted_corr_matrix_protocolName[colName], 2)
if abs(correlation) >= 0.01:
if correlation in feature_map:
feature_map[correlation].append(colName)
else:
feature_map[correlation] = [colName]
print("Columns with absolute correlation greater than 0.01 with ProtocolName: \n")
print(feature_map)
# choosing the random features from same correlated values
final_features = []
import random
# random_columns = []
for correlation, column_list in feature_map.items():
final_features.append(random.choice(column_list))
print("Number of Features to be Taken: ", len(final_features))
print("Randomly selected columns for each correlation value: ")
print(final_features)
pos_value_columns = final_features
# final data which would be used for prediction and spltting
data_for_prediction_featureSelection = data[pos_value_columns]
target_column = data["ProtocolName"]
# splitting data for feature selection via corelation matrix
feature_train, feature_test, target_train, target_test = train_test_split(
data_for_prediction_featureSelection, target_column, test_size=0.2
)
sc = StandardScaler()
feature_train = sc.fit_transform(feature_train)
feature_test = sc.transform(feature_test)
data_for_prediction_featureSelection
# standardizing the dataset and spliting the original data
(
feature_train_std,
feature_test_std,
target_train_std,
target_test_std,
) = train_test_split(x_dataset, y_dataset, test_size=0.2)
sc = StandardScaler()
feature_train_std = sc.fit_transform(feature_train_std)
feature_test_std = sc.transform(feature_test_std)
final_result = {
"Random Forest": {
"Feature Selection": 0,
"PCA": 0,
"K-Cross Folding": 0,
"Original": 0,
},
"Logistic Regression": {
"Feature Selection": 0,
"PCA": 0,
"K-Cross Folding": 0,
"Original": 0,
},
"Decision Tree": {
"Feature Selection": 0,
"PCA": 0,
"K-Cross Folding": 0,
"Original": 0,
},
}
f1Scores = {
"Random Forest": {
"Feature Selection": 0,
"PCA": 0,
"K-Cross Folding": 0,
"Original": 0,
},
"Logistic Regression": {
"Feature Selection": 0,
"PCA": 0,
"K-Cross Folding": 0,
"Original": 0,
},
"Decision Tree": {
"Feature Selection": 0,
"PCA": 0,
"K-Cross Folding": 0,
"Original": 0,
},
}
numberOfFeatures = {
"Random Forest": {
"Feature Selection": 0,
"PCA": 0,
"K-Cross Folding": 0,
"Original": 0,
},
"Logistic Regression": {
"Feature Selection": 0,
"PCA": 0,
"K-Cross Folding": 0,
"Original": 0,
},
"Decision Tree": {
"Feature Selection": 0,
"PCA": 0,
"K-Cross Folding": 0,
"Original": 0,
},
}
# ## Principal Component Analysis
pca = PCA(n_components=13)
feature_train_pca = pca.fit_transform(feature_train_std)
feature_test_pca = pca.transform(feature_test_std)
print(feature_train_pca.shape, feature_test_pca.shape)
# ## Implementing ML Models
# ### Random Forest Classifier
def random_forest_classifier(feature_train, target_train):
randomForest = RandomForestClassifier(n_estimators=200)
randomForest.fit(feature_train, target_train)
return randomForest
# #### Original Dataset
# random forest on original dataset
randomForest = random_forest_classifier(X_train, y_train)
predictions = randomForest.predict(X_test)
print(
"Accuracy with Random Forest on Original Dataset",
randomForest.score(X_test, y_test),
)
final_result["Random Forest"]["Original"] = randomForest.score(X_test, y_test)
f1Scores["Random Forest"]["Original"] = f1_score(y_test, predictions, average="micro")
numberOfFeatures["Random Forest"]["Original"] = X_train.shape[1]
# #### Feature Selection
# random forest on feature selection via correlation matrix data
randomForest = random_forest_classifier(feature_train, target_train)
predictions = randomForest.predict(feature_test)
print(
"Accuracy with Random Forest on Feature Selection via Correlaion Matrix",
randomForest.score(feature_test, target_test),
)
final_result["Random Forest"]["Feature Selection"] = randomForest.score(
feature_test, target_test
)
f1Scores["Random Forest"]["Feature Selection"] = f1_score(
y_test, predictions, average="micro"
)
numberOfFeatures["Random Forest"]["Feature Selection"] = feature_train.shape[1]
# #### PCA
# random forest on PCA
randomForest = random_forest_classifier(feature_train_pca, target_train_std)
predictions = randomForest.predict(feature_test_pca)
print(
"Accuracy with Random Forest on PCA",
randomForest.score(feature_test_pca, target_test_std),
)
final_result["Random Forest"]["PCA"] = randomForest.score(
feature_test_pca, target_test_std
)
f1Scores["Random Forest"]["PCA"] = f1_score(y_test, predictions, average="micro")
numberOfFeatures["Random Forest"]["PCA"] = feature_train_pca.shape[1]
# #### K Cross Folding
# random forest on k cross folding
rfKcross = RandomForestClassifier(n_estimators=100, random_state=42)
# Initialize k-fold cross-validation
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
# Initialize empty lists to store the accuracy scores for each fold
accuracy_scores = []
f1_scores = []
# Iterate through each fold
for train_index, test_index in kfold.split(x_dataset):
# Split the data into training and testing sets for this fold
X_train, X_test = x_dataset.iloc[train_index], x_dataset.iloc[test_index]
y_train, y_test = y_dataset.iloc[train_index], y_dataset.iloc[test_index]
# Fit the random forest classifier to the training data
rfKcross.fit(X_train, y_train)
# Evaluate the model on the testing data and store the accuracy score
accuracy_scores.append(rfKcross.score(X_test, y_test))
f1_scores.append(f1_score(y_test, rfKcross.predict(X_test), average="micro"))
print(rfKcross.score(X_test, y_test))
# Calculate and print the mean accuracy score across all folds
print("Mean accuracy:", sum(accuracy_scores) / len(accuracy_scores))
final_result["Random Forest"]["K-Cross Folding"] = sum(accuracy_scores) / len(
accuracy_scores
)
f1Scores["Random Forest"]["K-Cross Folding"] = sum(f1_scores) / len(f1_scores)
numberOfFeatures["Random Forest"]["K-Cross Folding"] = x_dataset.shape[1]
# ## Logistic Regression
def logistic_regression(feature_train, target_train):
classifier = LogisticRegression(random_state=0, solver="lbfgs", max_iter=100)
classifier.fit(feature_train, target_train)
return classifier
# #### Original Dataset
# logistic regression on original dataset
logisticRegressionOriginal = logistic_regression(X_train, y_train)
target_pred = logisticRegressionOriginal.predict(X_test)
print(
"Accuracy in Logistic Regression on Original Dataset",
logisticRegressionOriginal.score(X_test, y_test),
)
final_result["Logistic Regression"]["Original"] = logisticRegressionOriginal.score(
X_test, y_test
)
f1Scores["Logistic Regression"]["Original"] = f1_score(
y_test, predictions, average="micro"
)
numberOfFeatures["Logistic Regression"]["Original"] = X_train.shape[1]
# #### Feature Selection
# logistic regression on feature selction via correlation matrix data
logisticRegressionFeatureSelection = logistic_regression(feature_train, target_train)
target_pred = logisticRegressionFeatureSelection.predict(feature_test)
print(
"Accuracy in Logistic Regression on Feature Selection via Correlation Matrix",
logisticRegressionFeatureSelection.score(feature_test, target_test),
)
final_result["Logistic Regression"][
"Feature Selection"
] = logisticRegressionFeatureSelection.score(feature_test, target_test)
f1Scores["Logistic Regression"]["Feature Selection"] = f1_score(
y_test, predictions, average="micro"
)
numberOfFeatures["Logistic Regression"]["Feature Selection"] = feature_train.shape[1]
# #### PCA
# logistic regression on PCA
logisticRegressionPCA = logistic_regression(feature_train_pca, target_train_std)
target_pred = logisticRegressionPCA.predict(feature_test_pca)
print(
"Accuracy in Logistic Regression on PCA",
logisticRegressionPCA.score(feature_test_pca, target_test_std),
)
final_result["Logistic Regression"]["PCA"] = logisticRegressionPCA.score(
feature_test_pca, target_test_std
)
f1Scores["Logistic Regression"]["PCA"] = f1_score(y_test, predictions, average="micro")
numberOfFeatures["Logistic Regression"]["PCA"] = feature_train_pca.shape[1]
# #### K Cross Folding
# logistic regression on k cross folding
kCrossLogisticRegression = LogisticRegression()
kfold = KFold(n_splits=5, shuffle=True)
accuracy_scores = []
f1_scores = []
for train_index, test_index in kfold.split(x_dataset):
# Split the data into training and testing sets for this fold
X_train, X_test = x_dataset.iloc[train_index], x_dataset.iloc[test_index]
y_train, y_test = y_dataset.iloc[train_index], y_dataset.iloc[test_index]
# Fit the logistic regression classifier to the training data
kCrossLogisticRegression.fit(X_train, y_train)
# Evaluate the model on the testing data and store the accuracy score
accuracy_scores.append(kCrossLogisticRegression.score(X_test, y_test))
f1_scores.append(
f1_score(y_test, kCrossLogisticRegression.predict(X_test), average="micro")
)
print(kCrossLogisticRegression.score(X_test, y_test))
# Calculate and print the mean accuracy score across all folds
print("Mean accuracy:", sum(accuracy_scores) / len(accuracy_scores))
final_result["Logistic Regression"]["K-Cross Folding"] = sum(accuracy_scores) / len(
accuracy_scores
)
f1Scores["Logistic Regression"]["K-Cross Folding"] = sum(f1_scores) / len(f1_scores)
numberOfFeatures["Logistic Regression"]["K-Cross Folding"] = x_dataset.shape[1]
# ## Decision Tree
# #### Orignal Data
# decision tree on normal
decisionTreeNormal = DecisionTreeClassifier(
criterion="entropy", max_depth=3, random_state=42
)
decisionTreeNormal.fit(X_train, y_train)
target_pred = decisionTreeNormal.predict(X_test)
accuracy = decisionTreeNormal.score(X_test, y_test)
print("Accuracy in Decision Tree on Original Dataset:", accuracy)
final_result["Decision Tree"]["Original"] = accuracy
f1Scores["Decision Tree"]["Original"] = f1_score(y_test, predictions, average="micro")
numberOfFeatures["Decision Tree"]["Original"] = X_train.shape[1]
# #### Feature Selection
# decistion tree on feature selection
decisionTreeNormal = DecisionTreeClassifier(
criterion="entropy", max_depth=3, random_state=42
)
decisionTreeNormal.fit(feature_train, target_train)
target_pred = decisionTreeNormal.predict(feature_test)
accuracy = decisionTreeNormal.score(feature_test, target_test)
print("Accuracy in Decision Tree on Normal:", accuracy)
final_result["Decision Tree"]["Feature Selection"] = accuracy
f1Scores["Decision Tree"]["Feature Selection"] = f1_score(
y_test, predictions, average="micro"
)
numberOfFeatures["Decision Tree"]["Feature Selection"] = feature_train.shape[1]
# #### PCA
# decistion tree on PCA
decisionTreeNormal = DecisionTreeClassifier(
criterion="entropy", max_depth=3, random_state=42
)
decisionTreeNormal.fit(feature_train_pca, target_train_std)
target_pred = decisionTreeNormal.predict(feature_test_pca)
accuracy = decisionTreeNormal.score(feature_test_pca, target_test_std)
print("Accuracy in Decision Tree on PCA:", accuracy)
final_result["Decision Tree"]["PCA"] = accuracy
f1Scores["Decision Tree"]["PCA"] = f1_score(y_test, predictions, average="micro")
numberOfFeatures["Decision Tree"]["PCA"] = feature_train_pca.shape[1]
# #### K Cross folding
# decistion tree on K Cross Folding
decisionTreeNormal = DecisionTreeClassifier(
criterion="entropy", max_depth=3, random_state=42
)
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
accuracy_scores = []
f1_scores = []
for train_index, test_index in kfold.split(x_dataset):
X_train, X_test = x_dataset.iloc[train_index], x_dataset.iloc[test_index]
y_train, y_test = y_dataset.iloc[train_index], y_dataset.iloc[test_index]
decisionTreeNormal.fit(X_train, y_train)
accuracy_scores.append(decisionTreeNormal.score(X_test, y_test))
f1_scores.append(
f1_score(y_test, decisionTreeNormal.predict(X_test), average="micro")
)
print(decisionTreeNormal.score(X_test, y_test))
# Calculate and print the mean accuracy score across all folds
print("Mean accuracy:", sum(accuracy_scores) / len(accuracy_scores))
final_result["Decision Tree"]["K-Cross Folding"] = sum(accuracy_scores) / len(
accuracy_scores
)
f1Scores["Decision Tree"]["K-Cross Folding"] = sum(f1_scores) / len(f1_scores)
numberOfFeatures["Decision Tree"]["K-Cross Folding"] = x_dataset.shape[1]
accuracy_df = pd.DataFrame(final_result)
print("Accuracy for each model for particular algorithm")
accuracy_df
f1Score_df = pd.DataFrame(f1Scores)
print("F1 Scores for each model for particular algorithm")
f1Score_df
features_df = pd.DataFrame(numberOfFeatures)
print("Number of Features for each model and algorithm")
features_df
|
# # INTRODUCTION TO STATISTICS PART 1
# In this notebook, we will examine the basic concepts of Statistics with the heart disease data set and do the python application.
# 
# Content:
#
# 1. [Variable and Variable Types](#1)
# * [1.1 According to Their Structure](#2)
# * [1.2 According to their Property](#3)
# * [1.3 According to Role in Scientific Researches](#4)
#
# 1. [The concept of zero in measurement](#5)
# * [2.1 Absolute Zero (Natural)](#6)
# * [2.2 Relative Zero](#7)
# 1. [Levels of Measurement:](#8)
# * [3.1 Interval](#9)
# * [3.2 Ratio](#10)
# * [3.3 Nominal](#11)
# * [3.4 Ordinal](#12)
# 1. [Measures of Central Tendency](#13)
# * [4.1 Mean](#14)
# * [4.2 Median](#15)
# * [4.3 Mode](#16)
# 1. [Measures of Central Dispersion](#17)
# * [5.1 Range](#18)
# * [5.2 Variance](#19)
# * [5.3 Standart Deviation](#20)
# * [5.4 Skewness](#21)
# * [5.5 Kurtosis](#22)
# * [5.6 Quartile](#23)
# 1. [Statistical Thinking Model: Mooney](#24)
# * [6.1 Definition of Data](#25)
# * [6.2 Organizing Data](#26)
# * [6.3 Representation of Data](#27)
# * [6.4 Analyzing and Interpreting Data](#28)
# 1. [Population and Sample](#29)
# 1. [Confidence Interval](#30)
#
# # 1. Variable and Variable Types:
# ### It is a quantity that takes different values from unit to unit.
# ## 1.1 According to their structure:
# **1.1.1 Numerical variables:** Mathematically expressed by numbers. *Price, size*
# **1.1.2 Categorical Variables:** Variables that cannot be mathematically expressed with numbers. *Gender* is a categorical variable. Male and female are the classes of this variable.
import pandas as pd
import statistics
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
data = pd.read_csv("../input/heart-disease-uci/heart.csv")
numerical_variables = data[["age", "trestbps", "chol", "thalach", "oldpeak"]]
categorical_variables = data.drop(
["age", "trestbps", "chol", "thalach", "oldpeak"], axis=1
)
print("Numerical: ")
print(numerical_variables.head())
print("Categorical: ")
print(categorical_variables.head())
#
# ## 1.2 According to their property
# **1.2.1 Continuous Variable:** Variables in which an infinite value can be written between two values. *Age, LSAT score*. *Height (For example, I can write infinite values between 180 cm - 181 cm)*. It cannot be counted as a grain.
# **1.2.2 Discontinuous Variable:** It is a variable where a limited number of values can be written between two values. It is expressed without commas. For example, *Number of goals, Number of questions solved, blood groups.*
# ## 1.3 According to role in scientific researches:
# **1.3.1 Dependent Variable:** It is the variable whose effect is curious, affecting, causing. For example, does smoking affect heart health ? *Smoking = Dependent Variable*
# **1.3.2 Independent Variable:** The variable affected by the independent variable. In the previous example, *Heart health = Independent Variable*
# # 2. The concept of zero in measurement:
# It refers to the starting point of measurements.
# **2.1 Absolute Zero (Natural)**: It means nothingness. It cannot take a negative value. For example, *if number of solved questions is equal zero, it means you don't solved any questions.*
# **2.2 Relative Zero:** It can take a negative value. it does not mean nothingness. For example, *degrees centigrade*
# # 3. Levels of Measurement:
# * **3.1 Interval:** In this scale, units are grouped by range value and there are significant and standard ranges between measurement values. Differences between numbers are significant.For example, *temperature, exam scores.*
#
#
# * **3.2 Ratio:** Ratio scale has all the features of interval scale. The starting point is zero.(Absolute zero).For example, someone who is zero years old actually does not exist or if the number of students in the class is zero, it means that nobody is in the class.
#
# * **3.3 Nominal:** Indicates whether the objects are similar in properties or not. Mathematical operations cannot be performed between classes. But frequency and mode can be calculated.There is no hierarchy between classes. For example, *Gender, Marital Status*
#
#
# * **3.4 Ordinal:** Variables can be ordered according to a criterion. There is hierarchy between variables. For exapmle, *Military ranks(Captain, Major)*
# # 4. Measures of Central Tendency:
# Measures of central tendency are numbers that indicate the centre of a set of ordered numerical data.
# 1. Mean
# 1. Median
# 1. Mode
# ### 4.1 Mean
# The mean is calculated by adding up all of the values and dividing by the number of values.
# Let's find the mean of numerical variables.
print(numerical_variables.mean(axis=0))
#
# ### 4.2 Median
# The median the "middle" of a set of numbers in ascending or decending order. Let's find the median of numerical variables.
print(numerical_variables.median(axis=0))
# ## Note: If you have a lot of outliers, you should use the median instead of the mean.
# ### 4.3 Mode
# The mode is the most frequently occurring number.
# * Mode value is not a reliable measure when the number of population or sample is small.
# * Mode value not affected by outliers
# * It can be calculated in numerical and categorical variables.
print(data.mode(axis=0))
# for all variables
data.describe()
#
# # 5. Measures of Central Dispersion
# Measures of central dispersion show how “spread out” the elements of a data set are from the mean.
# 1. Range
# 1. Variance
# 1. Standard deviation
# 1. Skewness
# 1. Kurtosis
# 1. Quartile
# ### Range:
# The range of a data set is the difference between the largest value and the smallest value. Thing an exam. A large range of exam results means that the distinctiveness of the exam is high.
import numpy as np
def minmax(val_list):
min_val = (val_list).min()[0:14]
max_val = (val_list).max()[0:14]
rangevalue = max_val - min_val
print(
"Maximum value: {0} ".format(max_val)
+ " Minimum value: {0} ".format(min_val)
+ " Range value: {0} ".format(rangevalue)
)
return
minmax(numerical_variables)
#
# ### Variance:
# You can think of the variance as the average squared difference between the elements of a data set and the mean.
print(statistics.variance(data["age"]))
#
# ### Standard deviation:
# The standard deviation is simply the square root of the variance. If the standard deviation is high, we can say that the distribution is heterogeneous.
age_sdev = statistics.stdev(data["age"])
print("The Standard deviation of age: {:.4f}".format(age_sdev))
#
# ### Skewness:
# It is that the distribution of a variable is not symmetrical. If the coefficient of skewness is 0, it is normal distribution. You can understand better with the picture below
# 
# ### Let's look at the coefficient of skewness
data.skew(axis=0, skipna=True)
# ### And graph
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
import seaborn as sns
import matplotlib.pyplot as plt
f, axes = plt.subplots(2, 2, figsize=(15, 10), sharex=False)
sns.distplot(data.age.values, color="skyblue", ax=axes[0, 0])
sns.distplot(data.iloc[:, 4], color="olive", ax=axes[0, 1])
sns.distplot(data.iloc[:, 7], color="gold", ax=axes[1, 0])
sns.distplot(data.iloc[:, 3], color="teal", ax=axes[1, 1])
# thanks to MMelnicki
# https://stackoverflow.com/a/54775278
#
# ### Kurtosis:
# It shows the sharpness of the dominance of distribution. If the coefficient of kurtosis is 0, it is normal distribution.
data.kurtosis(axis=0, skipna=True)
#
# ### Quartile:
# The quartile measures the spread of values above and below the mean by dividing the distribution into four groups.
# A quartile divides data into three points—**a lower quartile, median, and upper quartile**—to form four groups of the dataset.
# 
numerical_variables.quantile([0.25, 0.5, 0.75])
# ### We can find outliers using quarters.
def outlier_treatment(numerical_variables):
sorted(numerical_variables)
Q1, Q3 = np.percentile(numerical_variables, [25, 75])
IQR = Q3 - Q1 # IQR = Interquartile Range
lowerrange = Q1 - (1.5 * IQR) # below this number is outlier
upperrange = Q3 + (1.5 * IQR) # the higher of this number is outlier
return lowerrange, upperrange
lowerbound, upperbound = outlier_treatment(numerical_variables.columns)
numerical_variables[
(numerical_variables.columns < lowerrange)
| (numerical_variables.columns > upperrange)
]
return
print("Outlier Borders for Age Column:")
print(outlier_treatment(numerical_variables.age)) # for age
# ### We can use the box plot to visualize outliers.
numerical_variables.plot(
kind="box",
subplots=True,
layout=(4, 4),
sharex=False,
sharey=False,
figsize=(15, 15),
)
plt.show()
#
# # 6. Statistical Thinking Model: MOONEY
# It is the guide that models the path from data literacy to data analytics.
# ### Stages of the Mooney
# 1. Definition of data
# 1. Organizing data
# 1. Representation of data
# 1. Analyzing and interpreting data
# ## 1. Definition of data
# ### What are the variables measured in the dataset?
data.columns
# ### What are the types of variables ?
data.head()
#
# * Age: Numerical variable
# * Sex: Categorical variable
# * Cp: Categorical Variable
# * Trestbps: Numerical Variable
# * Chol: Numerical variable
# * Fbs: Categorical variable
# * Restecg: Categorical variable
# * Thalach: Numerical variable
# * Exang: categorical variable
# * Oldpeak: Numerical variable
# * Slope: Categorical variable
# * Ca: Categorical variable
# * Thal: Categorical variable
# * Target: Categorical variable
# ### What scale are the variables you specified measured ?
# * Age: Ratio
# * Sex: Nominal
# * Cp: Nominal
# * Trestbps: Ratio (if indicates the severity of the pain --> Ordinal)
# * Chol: Ratio
# * Fbs: Nominal
# * Restecg: Nominal
# * Thalach: Ratio
# * Exang: Nominal
# * Oldpeak: Ratio
# * Slope: Nominal
# * Ca: Nominal
# * Thal: Nominal
# * Target: Nominal
# ## 2. Organizing data
# Organizing the data for better understanding.
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 5))
plt.hist(data.age, bins=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
# ### We can say that most of our samples are between 50-60.
data[["sex", "target"]].groupby(["sex"], as_index=False).mean().sort_values(
by="target", ascending=False
)
# **75 percent of those whose gender is female are sick, and 44 percent of those whose gender is male are sick.**
data[["cp", "target"]].groupby(["cp"], as_index=False).mean().sort_values(
by="target", ascending=False
)
# **Those with chest pain type 1 have a higher rate of getting sick**
# ## 3. Representation of Data
# In order to better understand the data, the correct graph selection should be made.
# **An example of wrong chart selection**
fig, ax = plt.subplots()
ax.scatter(data.age.index, data.age)
plt.ylabel("Age")
plt.xlabel("index")
plt.show()
# **Correct representation**
import seaborn as sns
from matplotlib import pyplot
a4_dims = (18, 8)
fig, ax = pyplot.subplots(figsize=a4_dims)
sns.countplot(x="age", hue="target", data=data, linewidth=1, ax=ax)
#
# ## 4. Analyzing and interpreting data
sns.set_style("whitegrid")
a4_dims = (18, 8)
fig, ax = pyplot.subplots(figsize=a4_dims)
sns.countplot(data.sex, hue="target", data=data, linewidth=1, ax=ax)
ax.set(xlabel="1=male - 0=female", ylabel="Count")
print(data.sex.value_counts())
# ### Although the number of women in the dataset is little, the disease is more common when the rates are examined.
# print(data.cp.value_counts())
# pd.crosstab(data.cp,data.target).plot(kind="barh",figsize=(15,7),color=['#0000ff','#000000'])
# plt.title('Chest pain type and target distribution')
# plt.xlabel('Frequency')
# plt.ylabel('Chest pain type')
# plt.show()
#
print(data.cp.value_counts())
pd.crosstab(data.cp, data.target).plot(
kind="barh", figsize=(15, 7), color=["#0000ff", "#000000"]
)
plt.title("Chest pain type and target distribution")
plt.xlabel("Frequency")
plt.ylabel("Chest pain type")
plt.show()
# ### The rate of disease is higher in patients with chest pain type 2.
# # 7. Population and Sample
# ### A *population* is the entire group that you want to draw conclusions about.
# ### A *sample* is the specific group that you will collect data from. The size of the sample is always less than the total size of the population.
# 
# ### Let's write an example for the variable age of the data set.
np.random.seed(10)
sample = np.random.choice(
a=data.age, size=100
) # we choose 100 random value from age column
print("Sample mean : {0}".format(sample.mean())) # sample mean
print("Population Mean : {:.2f}".format(data.age.mean())) # population mean
# ### the sample we selected represented the population with success.
# # 8. Confidence interval:
# A range of two numbers that can satisfy the estimated value of the population parameter should be selected. Lets try for cholestoral .
#
import statsmodels.stats.api as sms
sms.DescrStatsW(data.chol).tconfint_mean()
|
# # Intro
# Welcome to the [Bristol-Myers Squibb – Molecular Translation](https://www.kaggle.com/c/bms-molecular-translation/overview) Competition:
# 
# Please vote the notebook up if it helps you. Feel free to leave a comment above the notebook. Thank you.
# # Libraries
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# # Path
path = "/kaggle/input/bms-molecular-translation/"
os.listdir(path)
# # Load Data
train_data = pd.read_csv(path + "train_labels.csv")
samp_subm = pd.read_csv(path + "sample_submission.csv")
# # Export
output = samp_subm
output.to_csv("submission.csv", index=False)
output
|
# 20 "erfolgreichste", vielpraktizierende Rechtsvertreter für strafrechtliche Beschwerden am Bundesgericht seit Inkrafttreten der StPO (2011)
# ... wobei die Begrifflichkeit "Erfolg" in diesem Kontext selbstverständlich relativ ist und als am "erfolgreichsten" natürlich auch diejenige Anwältin oder derjenige Anwalt angesehen werden kann, der es gar nicht nötig hat, seine Fälle vor Bundesgericht zu bringen.
# 6B und 1B-Fälle in den Jahren 2011 bis Ende Juni 2020,
# (mit mind. durchschn. einer Beschwerde pro Jahr, d.h. mind. 9 Beschwerden total),
# Die Datengrundlage ist nicht fehlerfrei,
# Stand: 28.02.2021.
import pandas as pd
pd.set_option("precision", 3)
B2011 = pd.read_csv("/kaggle/input/bger6b2018/1B_2011.csv")
B2012 = pd.read_csv("/kaggle/input/bger6b2018/1B_2012.csv")
B2013 = pd.read_csv("/kaggle/input/bger6b2018/1B_2013.csv")
B2014 = pd.read_csv("/kaggle/input/bger6b2018/1B_2014.csv")
B2015 = pd.read_csv("/kaggle/input/bger6b2018/1B_2015.csv")
B2016 = pd.read_csv("/kaggle/input/bger6b2018/1B_2016.csv")
B2017 = pd.read_csv("/kaggle/input/bger6b2018/1B_2017.csv")
B2018 = pd.read_csv("/kaggle/input/bger6b2018/1B_2018.csv")
B2019 = pd.read_csv("/kaggle/input/bger6b2018/1B_2019.csv")
B2020 = pd.read_csv("/kaggle/input/bger6b2018/1B_2020.csv")
B2011b = pd.read_csv("/kaggle/input/bger6b2018/6B_2011.csv")
B2012b = pd.read_csv("/kaggle/input/bger6b2018/6B_2012.csv")
B2013b = pd.read_csv("/kaggle/input/bger6b2018/6B_2013.csv")
B2014b = pd.read_csv("/kaggle/input/bger6b2018/6B_2014.csv")
B2015b = pd.read_csv("/kaggle/input/bger6b2018/6B_2015.csv")
B2016b = pd.read_csv("/kaggle/input/bger6b2018/6B_2016.csv")
B2017b = pd.read_csv("/kaggle/input/bger6b2018/6B_2017.csv")
B2018b = pd.read_csv("/kaggle/input/bger6b2018/6B_2018.csv")
B2019b = pd.read_csv("/kaggle/input/bger6b2018/6B_2019.csv")
B2020b = pd.read_csv("/kaggle/input/bger6b2018/6B_2020.csv")
df = pd.concat(
[
B2011,
B2012,
B2013,
B2014,
B2015,
B2016,
B2017,
B2018,
B2019,
B2020,
B2011b,
B2012b,
B2013b,
B2014b,
B2015b,
B2016b,
B2017b,
B2018b,
B2019b,
B2020b,
]
)
# Es folgt eine Datenbereinigung:
# händische Korrektur "# Fehler bei get_vorinstanz"
df.loc[df.Verfahrensnummer == "1B_17/2011", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_24/2011", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_58/2011", "Vorinstanz"
] = "Obergericht des Kantons Thurgau"
df.loc[
df.Verfahrensnummer == "1B_68/2011", "Vorinstanz"
] = "Obergericht des Kantons Thurgau"
df.loc[df.Verfahrensnummer == "1B_192/2011", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_231/2011", "Vorinstanz"
] = "Cour de justice du canton de Genève"
df.loc[df.Verfahrensnummer == "1B_367/2011", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_393/2011", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_540/2011", "Vorinstanz"
] = "Kantonsgericht des Kantons Luzern"
df.loc[
df.Verfahrensnummer == "1B_13/2012", "Vorinstanz"
] = "ancien Juge d'instruction de la République et canton de Genève"
df.loc[df.Verfahrensnummer == "1B_203/2012", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_323/2012", "Vorinstanz"
] = "Obergericht des Kantons Aargau"
df.loc[df.Verfahrensnummer == "1B_498/2012", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_518/2012", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_616/2012", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[
df.Verfahrensnummer == "1B_631/2012", "Vorinstanz"
] = "Obergericht des Kantons Aargau"
df.loc[
df.Verfahrensnummer == "1B_59/2013", "Vorinstanz"
] = "Cour de justice de la République et canton de Genève"
df.loc[
df.Verfahrensnummer == "1B_109/2013", "Vorinstanz"
] = "Obergericht des Kantons Aargau"
df.loc[df.Verfahrensnummer == "1B_373/2013", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_386/2013", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_15/2014", "Vorinstanz"
] = "Obergericht des Kantons Aargau"
df.loc[
df.Verfahrensnummer == "1B_50/2014", "Vorinstanz"
] = "Obergericht des Kantons Zürich"
df.loc[df.Verfahrensnummer == "1B_229/2014", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_261/2014", "Vorinstanz"
] = "Tribunal des mesures de contrainte du canton de Vaud"
df.loc[
df.Verfahrensnummer == "1B_399/2014", "Vorinstanz"
] = "Obergericht des Kantons Aargau"
df.loc[
df.Verfahrensnummer == "1B_8/2015", "Vorinstanz"
] = "Cour de justice de la République et canton de Genève"
df.loc[
df.Verfahrensnummer == "1B_28/2015", "Vorinstanz"
] = "Tribunal cantonal de l'Etat de Fribourg"
df.loc[
df.Verfahrensnummer == "1B_140/2015", "Vorinstanz"
] = "Tribunal cantonal de l'Etat de Fribourg"
df.loc[
df.Verfahrensnummer == "1B_367/2015", "Vorinstanz"
] = "Obergericht des Kantons Aargau"
df.loc[
df.Verfahrensnummer == "1B_405/2015", "Vorinstanz"
] = "Obergericht des Kantons Aargau"
df.loc[df.Verfahrensnummer == "1B_13/2016", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_54/2016", "Vorinstanz"
] = "Appellationsgericht des Kantons Basel-Stadt"
df.loc[
df.Verfahrensnummer == "1B_94/2016", "Vorinstanz"
] = "Cour de justice de la République et canton de Genève"
df.loc[df.Verfahrensnummer == "1B_102/2016", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_107/2016", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_138/2016", "Vorinstanz"
] = "Cour d'appel pénal de l'Etat de Fribourg"
df.loc[df.Verfahrensnummer == "1B_141/2016", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_162/2016", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_164/2016", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_182/2016", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_287/2016", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_310/2016", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_370/2016", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_465/2016", "Vorinstanz"
] = "Haftgericht des Kantons Solothurn"
df.loc[
df.Verfahrensnummer == "1B_495/2016", "Vorinstanz"
] = " Tribunal cantonal de l'Etat de Fribourg et l'Autorité"
df.loc[
df.Verfahrensnummer == "1B_28/2017", "Vorinstanz"
] = "Cour de justice de la République et canton de Genève"
df.loc[df.Verfahrensnummer == "1B_44/2017", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_45/2017", "Vorinstanz"
] = "Appellationsgericht des Kantons Basel-Stadt"
df.loc[df.Verfahrensnummer == "1B_183/2017", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[
df.Verfahrensnummer == "1B_197/2017", "Vorinstanz"
] = "Obergericht des Kantons Zürich"
df.loc[
df.Verfahrensnummer == "1B_343/2017", "Vorinstanz"
] = "Obergericht des Kantons Zürich"
df.loc[
df.Verfahrensnummer == "1B_458/2017", "Vorinstanz"
] = "Tribunal des mesures de contrainte de la République et canton de Genève"
df.loc[df.Verfahrensnummer == "1B_89/2018", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_91/2018", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_229/2018", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_230/2018", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_231/2018", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_232/2018", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[
df.Verfahrensnummer == "1B_286/2018", "Vorinstanz"
] = "Cour des plaintes du Tribunal pénal fédéral"
df.loc[df.Verfahrensnummer == "1B_288/2018", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_290/2018", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_292/2018", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_452/2018", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_458/2018", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_135/2019", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_159/2019", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "1B_169/2019", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_381/2019", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "1B_515/2019", "Vorinstanz"
] = "Tribunal cantonal de la République et canton du Jura"
df.loc[df.Verfahrensnummer == "1B_77/2020", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_95/2020", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_165/2020", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_177/2020", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_189/2020", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_584/2020", "Vorinstanz"] = "Kantonsgericht Luzern"
df.loc[df.Verfahrensnummer == "1B_609/2020", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "1B_611/2020", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "6B_110/2011", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "6B_231/2011", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "6B_559/2011", "Vorinstanz"
] = "Tribunal cantonal du canton du Valais"
df.loc[df.Verfahrensnummer == "6B_119/2012", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "6B_74/2013", "Vorinstanz"
] = "Corte dei reclami penali del Tribunale d'appello del Cantone Ticino"
df.loc[df.Verfahrensnummer == "6B_103/2013", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "6B_376/2013", "Vorinstanz"
] = "Tribunal du IIe arrondissement pour le district de Sion"
df.loc[df.Verfahrensnummer == "6B_388/2013", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "6B_787/2013", "Vorinstanz"
] = "Tribunal cantonal de l'Etat de Fribourg"
df.loc[df.Verfahrensnummer == "6B_821/2013", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "6B_1035/2013", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[df.Verfahrensnummer == "6B_354/2014", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "6B_513/2014", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "6B_307/2015", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[df.Verfahrensnummer == "6B_394/2015", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "6B_462/2015", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "6B_726/2015", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "6B_720/2016", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "6B_818/2016", "Vorinstanz"] = "Bundesstrafgericht"
df.loc[
df.Verfahrensnummer == "6B_845/2016", "Vorinstanz"
] = "Tribunal cantonal du canton du Valais"
df.loc[
df.Verfahrensnummer == "6B_1026/2016", "Vorinstanz"
] = "Obergericht des Kantons Aargau"
df.loc[
df.Verfahrensnummer == "6B_1197/2016", "Vorinstanz"
] = "Obergericht des Kantons Bern"
df.loc[
df.Verfahrensnummer == "6B_133/2017", "Vorinstanz"
] = "Cour de justice de la République et canton de Genève"
df.loc[
df.Verfahrensnummer == "6B_584/2017", "Vorinstanz"
] = "Cour d'appel pénale du Tribunal cantonal du canton de Vaud"
df.loc[df.Verfahrensnummer == "6B_1273/2017", "Vorinstanz"] = "undefiniert"
df.loc[df.Verfahrensnummer == "6B_1355/2017", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "6B_1407/2017", "Vorinstanz"
] = "Corte dei reclami penali del Tribunale d'appello del Cantone Ticino"
df.loc[df.Verfahrensnummer == "6B_251/2018", "Vorinstanz"] = "undefiniert"
df.loc[
df.Verfahrensnummer == "6B_295/2018", "Vorinstanz"
] = "Corte dei reclami penali del Tribunale d'appello del Cantone Ticino"
df.loc[
df.Verfahrensnummer == "6B_316/2018", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_1005/2018", "Vorinstanz"
] = "Wirtschaftsstrafgericht des Kantons Bern"
df.loc[
df.Verfahrensnummer == "6B_1337/2018", "Vorinstanz"
] = "Corte dei reclami penali del Tribunale d'appello del Cantone Ticino"
df.loc[
df.Verfahrensnummer == "6B_136/2019", "Vorinstanz"
] = "Corte dei reclami penali del Tribunale d'appello del Cantone Ticino"
df.loc[
df.Verfahrensnummer == "6B_290/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_320/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_472/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_487/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_540/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_569/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_622/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_625/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_691/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_703/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_812/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_835/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_839/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_911/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_986/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_1035/2019", "Vorinstanz"
] = "Appellationsgericht des Kantons Basel-Stadt"
df.loc[
df.Verfahrensnummer == "6B_1069/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_1077/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_1079/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_1082/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_1132/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_1146/2019", "Vorinstanz"
] = "Tribunal cantonal du canton de Vaud"
df.loc[
df.Verfahrensnummer == "6B_1231/2019", "Vorinstanz"
] = "Corte dei reclami penali del Tribunale d'appello del Cantone Ticino"
df.loc[
df.Verfahrensnummer == "6B_1314/2019", "Vorinstanz"
] = "Obergerichts des Kantons Zürich"
df.loc[
df.Verfahrensnummer == "6B_96/2020", "Vorinstanz"
] = "Corte dei reclami penali del Tribunale d'appello del Cantone Ticino"
df.loc[
df.Verfahrensnummer == "6B_275/2020", "Vorinstanz"
] = "Tribunale d'appello del Cantone Ticino"
# Regex-Ersetzungen
df = df.replace(r"(?<=Anklagekammer des Kantons St. Gdfn).*", "", regex=True)
df = df.replace(
r"Anklagekammer.*Gdfn", "Anklagekammer des Kantons St. Gdfn", regex=True
)
df = df.replace(r"(?<=Anklagekammer des Kantons Thurgau).*", "", regex=True)
df = df.replace(r"(?<=Anklagekammer des Kantons Waadt).*", "", regex=True)
df = df.replace(r"(?<=Appellationsgericht des Kantons Basel-Stadt).*", "", regex=True)
df = df.replace(
r"(?<=Appellationsgerichtpr).*", "aesidentIn des Kantons Basel-Stadt", regex=True
)
df = df.replace(
r"Cour de cassation pénale", "Tribunal cantonal du canton de Vaud", regex=True
)
df = df.replace(
r"Tribunal cantonal",
"Tribunal d'accusation du Tribunal cantonal du canton de Vaud",
regex=True,
)
# händische Korrektur merkwürdiger Resultate
df.loc[df.Verfahrensnummer == "1B_232/2011", "Vorinstanz"] = "Kantonsgericht Schwyz"
df.loc[
df.Verfahrensnummer == "1B_30/2011", "Vorinstanz"
] = "Obergericht des Kantons Zürich"
df.loc[
df.Verfahrensnummer == "1B_46/2011", "Vorinstanz"
] = "Obergericht des Kantons Bern"
df.loc[df.Verfahrensnummer == "6B_406/2017", "Vorinstanz"] = "Kantonsgericht St. Gdfn"
df.loc[
df.Verfahrensnummer == "1B_2/2011", "Vorinstanz"
] = "Obergericht des Kantons Bern"
df.loc[
df.Verfahrensnummer == "6B_377/2016", "Vorinstanz"
] = "Obergericht des Kantons Bern"
df.loc[
df.Verfahrensnummer == "1B_411/2015", "Vorinstanz"
] = "Obergericht des Kantons Aargau" #
# händische Korrektur fehlerhafter Ergebnisse
df.loc[df.Verfahrensnummer == "1B_67/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_89/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_232/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_273/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_326/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_331/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_387/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_421/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_435/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_450/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_465/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_471/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_520/2011", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_540/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_684/2011", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "1B_685/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_687/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_87/2012", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_186/2012", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_189/2012", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_208/2012", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_290/2012", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_323/2012", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_365/2012", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "1B_460/2012", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "1B_665/2012", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_125/2013", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "1B_129/2013", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "1B_326/2013", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_356/2013", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_397/2013", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "1B_411/2013", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_456/2013", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_30/2014", "Verfahrensergebnis"] = "Abweisung"
df.loc[
df.Verfahrensnummer == "1B_179/2014", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_245/2014", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "1B_251/2014", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_298/2014", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_299/2014", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_365/2014", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "1B_375/2014", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_417/2014", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_419/2014", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_8/2015", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_63/2015", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "1B_168/2015", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_209/2015", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_224/2015", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_250/2015", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "1B_361/2015", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "1B_411/2015", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "1B_415/2015", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "1B_31/2016", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "1B_42/2016", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_81/2016", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_172/2016", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "1B_174/2016", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "1B_176/2016", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_184/2016", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_185/2016", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_187/2016", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_264/2016", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "1B_266/2016", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_417/2016", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_28/2017", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_83/2017", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_129/2017", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_152/2017", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_157/2017", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "1B_191/2017", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_305/2017", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_384/2017", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_131/2018", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_179/2018", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "1B_185/2018", "Verfahrensergebnis"] = "Abweisung"
df.loc[
df.Verfahrensnummer == "1B_207/2018", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_267/2018", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "1B_286/2018", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "1B_288/2018", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "1B_290/2018", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "1B_292/2018", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_382/2018", "Verfahrensergebnis"] = "Abweisung"
df.loc[
df.Verfahrensnummer == "1B_41/2019", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "1B_93/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_139/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_170/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_216/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_280/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_324/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_416/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_479/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_549/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_556/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "1B_567/2019", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "1B_568/2019", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_593/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_617/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_29/2020", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_384/2020", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "1B_397/2020", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "1B_398/2020", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_399/2020", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_406/2020", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_412/2020", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "1B_450/2020", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_611/2020", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_9/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_47/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_99/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_105/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_149/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_181/2011", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "6B_198/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_218/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_224/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_240/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_363/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_590/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_596/2011", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_661/2011", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_700/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_705/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_787/2011", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_2/2012", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_11/2012", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_98/2012", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_143/2012", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_171/2012", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_184/2012", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_191/2012", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_226/2012", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_231/2012", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_508/2012", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_611/2012", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_116/2013", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_125/2013", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_178/2013", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "6B_326/2013", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_376/2013", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_494/2013", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "6B_850/2013", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_985/2013", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_1022/2013", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "6B_1074/2013", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "6B_1152/2013", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_1161/2013", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_1179/2013", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_1187/2013", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1206/2013", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_1212/2013", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_85/2014", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_112/2014", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_135/2014", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_167/2014", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_211/2014", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_212/2014", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_307/2014", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_320/2014", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_354/2014", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_664/2014", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_690/2014", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_719/2014", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_774/2014", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_855/2014", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_879/2014", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "6B_922/2014", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "6B_1240/2014", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_265/2015", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_334/2015", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_356/2015", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "6B_424/2015", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_424/2015", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_591/2015", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_839/2015", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_895/2015", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1161/2015", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_1237/2015", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1268/2015", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_10/2016", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_192/2016", "Verfahrensergebnis"] = "Abweisung"
df.loc[
df.Verfahrensnummer == "6B_259/2016", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_267/2016", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_411/2016", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_454/2016", "Verfahrensergebnis"] = "Abweisung"
df.loc[
df.Verfahrensnummer == "6B_492/2016", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_564/2016", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_691/2016", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_709/2016", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_818/2016", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_842/2016", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_845/2016", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_845/2016", "Verfahrensergebnis"
] = "teilweise Gutheissung" # bemerkenswertes Dispostiv
df.loc[
df.Verfahrensnummer == "6B_922/2016", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1051/2016", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_1135/2016", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1181/2016", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1241/2016", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_1264/2016", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_1271/2016", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_1314/2016", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1421/2016", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_133/2017", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_285/2017", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_423/2017", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_756/2017", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_849/2017", "Verfahrensergebnis"] = "Abweisung"
df.loc[
df.Verfahrensnummer == "6B_971/2017", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_1010/2017", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_1011/2017", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_1054/2017", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1101/2017", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1116/2017", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_1199/2017", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_1234/2017", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_1355/2017", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_1389/2017", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "6B_9/2018", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_18/2018", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_48/2018", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_184/2018", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "6B_308/2018", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_500/2018", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_535/2018", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_591/2018", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "6B_736/2018", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "6B_896/2018", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "6B_1066/2018", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_1309/2018", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_1322/2018", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "6B_17/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_37/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_178/2019", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_262/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_327/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_640/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_680/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_765/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_780/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_924/2019", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "6B_976/2019", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1010/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[df.Verfahrensnummer == "6B_1035/2019", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_1063/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_1094/2019", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_1094/2019", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1177/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1313/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1314/2019", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1330/2019", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_1368/2019", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1433/2019", "Verfahrensergebnis"] = "Abweisung"
df.loc[df.Verfahrensnummer == "6B_48/2020", "Verfahrensergebnis"] = "Gutheissung"
df.loc[
df.Verfahrensnummer == "6B_69/2020", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[
df.Verfahrensnummer == "6B_155/2020", "Verfahrensergebnis"
] = "Gegenstandslosigkeit"
df.loc[df.Verfahrensnummer == "6B_418/2020", "Verfahrensergebnis"] = "Nichteintreten"
df.loc[
df.Verfahrensnummer == "6B_587/2020", "Verfahrensergebnis"
] = "teilweise Gutheissung"
df.loc[df.Verfahrensnummer == "6B_1148/2020", "Verfahrensergebnis"] = "Nichteintreten"
# Korrekturen (auf Vorrat) noch nicht im Datensatz befindlicher Urteile
df.loc[df.Verfahrensnummer == "1B_438/2020", "Verfahrensergebnis"] = "Gutheissung"
df.loc[df.Verfahrensnummer == "1B_611/2019", "Verfahrensergebnis"] = "Abweisung"
df.loc[
df.Verfahrensnummer == "1B_643/2020", "Verfahrensergebnis"
] = "teilweise Gutheissung"
# händische Korrektur bemerkter fehlerhaft ausgewerteter Verfahrensergebnisse, auf Vorrat
df.loc[df.Verfahrensnummer == "6B_479/2020", "Verfahrensergebnis"] = "Gutheissung"
rv_count = df.groupby("Rechtsvertreter").Verfahrensnummer.count().reset_index()
rv_count = rv_count.rename(columns={"Verfahrensnummer": "tot"})
rv_count = rv_count.sort_values("tot", ascending=False)
# unvertretene und anonymisierte aussortieren
rv_count = rv_count[rv_count.Rechtsvertreter != "unvertreten"]
rv_count = rv_count[rv_count.Rechtsvertreter != "A.________"]
# einfachnennungen aussortieren
rv_count = rv_count[rv_count.tot > 1]
rv_gut_count = df[
df.Verfahrensergebnis.str.contains(
"(teilweise Gutheissung|Gutheissung)", regex=True
)
]
rv_gut_count = (
rv_gut_count.groupby("Rechtsvertreter").Verfahrensnummer.count().reset_index()
)
rv_gut_count = rv_gut_count.rename(columns={"Verfahrensnummer": "tot_gut"})
rv_gut_count = rv_gut_count.sort_values("tot_gut", ascending=False)
rv_gut_count = rv_gut_count[rv_gut_count.Rechtsvertreter != "unvertreten"]
rv_gut_count = rv_gut_count[rv_gut_count.tot_gut > 1]
rv_gut_count = pd.merge(rv_count, rv_gut_count, how="outer")
rv_gut_count = rv_gut_count.fillna(0)
rv_gut_count["%"] = rv_gut_count.tot_gut / rv_gut_count.tot * 100
rv_gut_count = rv_gut_count.sort_values(["%", "tot_gut"], ascending=False)
# nicht erfolgreiche ausblenden
rv_gut_count = rv_gut_count[rv_gut_count.tot_gut > 0]
# alle unter 9 Beschwerden (mind. durchschn. 1 Beschwerde pro Jahr) ausblenden
rv_gut_count = rv_gut_count[rv_gut_count.tot >= 9]
rv_gut_count = rv_gut_count.reset_index(drop=True)
# cut top20
rv_gut_count = rv_gut_count.head(20)
rv_gut_count = rv_gut_count.rename(
columns={"tot": "Total Beschwerden", "tot_gut": "Total Gutheissungen"}
)
rv_gut_count.style.background_gradient(subset=["%"], cmap="BuGn")
# Jean-Pierre Garbade
# Liste der erfolgreichen Fälle
platz1 = df[
df.Rechtsvertreter.str.contains("Jean-Pierre Garbade")
& df.Verfahrensergebnis.str.contains(
"(teilweise Gutheissung|Gutheissung)", regex=True
)
]
platz1 = platz1.sort_values("Verfahrensergebnis", ascending=False)
platz1.style
# Daniel Kinzer, avocat
# Liste der erfolgreichen Fälle
platz3 = df[
df.Rechtsvertreter.str.contains("Daniel Kinzer")
& df.Verfahrensergebnis.str.contains(
"(Gutheissung|teilweise Gutheissung)", regex=True
)
]
platz3 = platz3.sort_values("Verfahrensergebnis", ascending=False)
platz3.style
|
import random
from collections import defaultdict
from kaggle_environments.envs.hungry_geese.hungry_geese import (
Observation,
Configuration,
Action,
row_col,
translate,
)
# # Helper functions
# Mostly one-liners for getting info out of the observation data structure, to help keep subsequent code readable and less error-prone.
def my_snake(obs):
return obs.geese[obs.index]
def my_head(obs):
return my_snake(obs)[0]
def translate_my_head(config, obs, action):
return translate(
position=my_head(obs),
direction=action,
columns=config.columns,
rows=config.rows,
)
def step(obs):
# obs.step starts counting at 0; "step number" (for the purpose of hunger and episodeSteps) seems to start counting at 1.
return obs.step + 1
def action_eats_food(config, obs, action):
return translate_my_head(config, obs, action) in obs.food
# # State tracking globals
# These are necessary to track important game state which is not provided as part of the Observation object. They are (somewhat naively) updated every time that the agent function is called.
prev_action = None
# # Q-functions
# Evaluate various qualities of taking a given action at a given game state.
# (*relative change in value* is what matters: being closer to food is better. How much better?)
# tries to predict, based on features: "what can your reward ~~eventually be~~ increase by in the future if you choose this move?"
# "How will my future reward be affected by this action?"
# "How much *more* reward might you be able to get (in addition to what you have so far)?"
# Will my length change due to this action?
def delta_length(config, obs, action):
l = 0
if action_eats_food(config, obs, action):
# I would eat food if I took this action
l += 1
if step(obs) % config.hunger_rate == 0:
# I would lose length due to hunger this turn
l -= 1
return l
# will I crash and die immediately? If I do, how many steps would I have missed out on?
def collision_based_ttl(config, obs, action):
global prev_action
# It may be a good idea to try to cache and/or reuse all_goose_loc across q functions if performance is an issue
all_goose_loc = set([position for goose in obs.geese for position in goose])
if (
translate_my_head(config, obs, action) in all_goose_loc
) or action.opposite() == prev_action:
return 1 # config['episodeSteps'] - (step(obs)+1) # I would die on the very next step, so I would "miss out on" this many steps
# WHY doesn't Configuration object have attribute episodeSteps when the dict has it????
return config["episodeSteps"] - step(
obs
) # optimistic - we won't ever collide, don't worry!
# If I didn't eat starting now, how long would I last before I die of hunger?
def hunger_ttl(config, obs, action):
snake_len = len(my_snake(obs))
snake_len += delta_length(
config, obs, action
) # what WILL my length be after this action?
last_hunger_point = (step(obs) // config.hunger_rate) * config.hunger_rate
time_die_of_hunger = last_hunger_point + snake_len * config.hunger_rate
return min(config["episodeSteps"], time_die_of_hunger) - step(
obs
) # or will I not die of hunger before the game is over?
# combine hunger_ttl and collision_based_ttl: what might cause me to die in the foreseeable future?
def min_ttl(config, obs, action):
return min(
hunger_ttl(config, obs, action), collision_based_ttl(config, obs, action)
)
# flood-fill distance to nearest available food
# (including possibility of distance=0, meaning "you would eat food next turn if you took this action")
def nearest_food_dist(config, obs, action):
all_goose_loc = set([position for goose in obs.geese for position in goose])
food_loc = set(obs.food)
max_dist = config.columns * config.rows
next_head = translate_my_head(config, obs, action)
"""if next_head in all_goose_loc or action.opposite()==prev_action:
return max_dist"""
processed_cells = set()
to_process = [(next_head, 0)]
while len(to_process) > 0:
loc, dist = to_process.pop(0)
if loc not in processed_cells:
processed_cells.add(loc)
if loc in all_goose_loc:
# going here would crash the goose and (probably) not actually eat any food present.
# ignore this location and keep searching.
continue
if loc in food_loc:
# Food here! return the distance
return dist
else:
# no food here - where can we go from here?
next_dist = dist + 1
for next_a in Action:
next_loc = translate(
loc, next_a, columns=config.columns, rows=config.rows
)
# if next_loc not in all_goose_loc:
to_process.append((next_loc, next_dist))
# ran out of potential cells to process and did not find accessible food - return dummy value
return max_dist
# How much space can I reach *before* any other goose does?
# counting space which will definitely clear out by the time I get there
def uncontested_space(config, obs, action):
# Enumerate all spaces taken up by geese, and when they will clear out
goose_parts = {}
for goose in obs.geese:
gl = len(goose)
for i, position in enumerate(goose):
tt_leave = gl - i # ranges from 1 (tail) to goose length (head)
# avoid following tail directly, in case the goose eats food (?)
goose_parts[position] = tt_leave
# If I would crash by taking this action, I have 0 space.
next_head = translate_my_head(config, obs, action)
if (
next_head in goose_parts and goose_parts[next_head] > 0
) or action.opposite() == prev_action:
return 0
# flood-fill from all geese at once; but keeping my goose separate, going last
# (because it's actually ahead after taking the action in question)
# track (location, time to get there) tuples for valid locations for a goose to go
other_to_process = [
(g[0], 0) for i, g in enumerate(obs.geese) if (i != obs.index and len(g) > 0)
]
me_to_process = [(next_head, 1)]
me_uncontested = set([next_head])
# spaces which are already 'claimed' - not uncontested
claimed = set([pos for pos, dist in other_to_process])
claimed.add(next_head)
other_next_step = []
me_next_step = []
while len(me_to_process) > 0: # we care only about fully flood-filling *my* space
# other geese take next step(s)
for other_loc, other_step in other_to_process:
for a in Action:
next_loc = translate(
other_loc, a, columns=config.columns, rows=config.rows
)
# can it go there uncontessted?
if (next_loc not in claimed) and (
(next_loc not in goose_parts)
or (goose_parts[next_loc] <= other_step)
):
claimed.add(next_loc)
other_next_step.append((next_loc, other_step + 1))
# my goose takes next step(s)
for my_loc, my_step in me_to_process:
for a in Action:
next_loc = translate(
my_loc, a, columns=config.columns, rows=config.rows
)
if (next_loc not in claimed) and (
(next_loc not in goose_parts) or (goose_parts[next_loc] <= my_step)
):
claimed.add(next_loc)
me_next_step.append((next_loc, my_step + 1))
me_uncontested.add(next_loc)
# swap in new to_process lists
other_to_process = other_next_step
me_to_process = me_next_step
other_next_step = []
me_next_step = []
return len(me_uncontested)
# What's my chance of colliding with someone next turn (assuming geese move randomly to a not-currently-occupied spot)?
# factors in both unavoidable collisions(chance=1) and head-on collisions which depend on where the other goose goes
def chance_to_collide(config, obs, action):
next_head = translate_my_head(config, obs, action)
goose_parts = {}
for goose in obs.geese:
gl = len(goose)
for i, position in enumerate(goose):
tt_leave = gl - i # ranges from 1 (tail) to goose length (head)
# avoid following tail directly, in case the goose eats food (?)
goose_parts[position] = tt_leave
other_heads = [
g[0] for i, g in enumerate(obs.geese) if (i != obs.index and len(g) > 0)
]
# if I am walking right into somebody (including me), the chance is 1.
if (
next_head in goose_parts and goose_parts[next_head] > 0
) or action.opposite() == prev_action:
return 1
headon_chances = []
for h in other_heads:
total_options = 0
head_on = 0
for a in Action:
next_loc = translate(h, a, columns=config.columns, rows=config.rows)
if next_loc not in goose_parts or (
goose_parts[next_loc] <= 0
): # goose may actually go there
total_options += 1
if next_loc == next_head:
head_on += 1
if total_options > 0: # maybe that goose is in a dead end
headon_chances.append(head_on / total_options)
if len(headon_chances) == 0: # maybe nobody has viable moves
return 0
return max(headon_chances)
# A very special q function to account for the fact that (for some reason) we gain an extra 100 points on the very first step
def is_first_step(config, obs, action):
return 1 if step(obs) == 1 else 0
# A function which returns a constant (like an intercept for linear regression)
def one(config, obs, action):
return 1
# ## Initial guess at weights
q_weights = {
delta_length: 1,
hunger_ttl: 80,
uncontested_space: 20,
nearest_food_dist: -1,
is_first_step: 100,
chance_to_collide: -400,
one: 1,
}
# # Agent
# The agent's logic is relatively simple: for each possible action, evaluate each q-function and add them up with appropriate weights. Then choose the action that gives you the biggest number.
# But we need to split it out across several functions because we will need the intermediate results during training.
# Given a state (defined by observation and configuration objects) and an action,
# evaluate individual q-function components of q-value
def evaluate_q_function(obs, config, action):
global q_weights
qf_to_value = {}
for q_func in q_weights:
qf_to_value[q_func] = q_func(config, obs, action)
return qf_to_value
# Given a state (defined by observation and configuration objects)
# Return what each q-function would evaluate to for *each* potential action we could take from this state.
def evaluate_q_functions(obs, config):
action_to_qfs = {}
for potential_action in Action:
action_to_qfs[potential_action] = evaluate_q_function(
obs, config, potential_action
)
return action_to_qfs
# Given data about a single q-function evaluation, as returned from evaluate_q_function,
# return the actual Q value of taking that action
def get_q_value(action_qf_values):
q_value = 0
for q_func in q_weights:
q_value += q_weights[q_func] * action_qf_values[q_func]
return q_value
# Given data about q-function evaluations (as returned by evaluate_q_functions) above,
# return the best action to take, and what its q-value would be
def get_best_q_value(action_to_qfs):
global q_weights
options = []
for potential_action in action_to_qfs:
q_value = get_q_value(action_to_qfs[potential_action])
options.append((potential_action, q_value))
return max(options, key=lambda x: x[1])
# Commit to taking this particular action by updating global state (D:) as if it was already taken
def update_state(acion_taken):
global prev_action
prev_action = acion_taken
def agent(obs_dict, config_dict):
obs = Observation(obs_dict)
config = Configuration(config_dict)
action_to_qfs = evaluate_q_functions(obs, config)
best_action, best_Q_value = get_best_q_value(action_to_qfs)
update_state(best_action)
return best_action.name
# ## Test run
# the %%writefile magic command writes the given code to a file, but does *not* run it.
# in order to use the code now, we actually run the file we wrote to previously.
from kaggle_environments import make
prev_action = None # reset state
env = make("hungry_geese") # , debug=True)
run = env.run(["greedy", "greedy", "greedy", agent])
env.render(mode="ipython", width=500, height=500)
print("our agent is the red goose")
print("scores:")
print([x["reward"] for x in run[-1]])
# # Training
# ## Learn while playing
# reset q_weights to be 1 or -1 (keep the sign, lose the value) to train from the ground up
# NOTE: because of the writing to file and then reading from file scheme,
# the variable q_weights seems to have weird scope, and thus can't be reassignedd directly
# if you try saying q_weights = {...}, it will *shadow* the original q_weights instead of changing it
# meaning that the agent functions will use the original q_weights, but the training code will be changing the new version.
for qf in q_weights:
q_weights[qf] = q_weights[qf] / abs(q_weights[qf])
print(q_weights)
trainer = env.train(["greedy", "greedy", "greedy", None])
my_index = 3
obs_dict = trainer.reset()
config_dict = env.configuration
config = Configuration(config_dict)
discount_factor = 1 # 1 = no discount
learning_rate = 0.01 # effective learning rate should decrease as you go (weigh the recent experiences in proportion to history)
learn_count = 500 # I decrease effective learning rate this by increasing learn_count and dividing by it.
chance_to_random = 0.001 # chance to move randomly instead of using the "optimal" move suggested by the q-functions
# track learning
learned_values = defaultdict(list)
place_record = []
for i in range(10000):
# Replicate the logic in agent() but get the intermediate values and also act randomly with some chance.
obs = Observation(obs_dict)
action_to_qfs = evaluate_q_functions(obs, config)
chosen_action, Q_value = get_best_q_value(action_to_qfs)
if random.random() < chance_to_random:
chosen_action = random.choice(list(Action))
Q_value = get_q_value(action_to_qfs[chosen_action])
q_func_values = action_to_qfs[chosen_action]
update_state(chosen_action)
# pass the chosen action to the trainer in order to get next observation and reward info
obs_dict, reward, done, info = trainer.step(chosen_action.name)
if done:
my_score = env.state[my_index]["reward"]
num_better = sum([g["reward"] > my_score for g in env.state])
print(
f"your goose died on step {env.state[0]['observation']['step']+1} (placed: {num_better+1}; scores:{[g['reward'] for g in env.state]})"
)
print()
place_record.append(num_better + 1)
# calculate next state's best q-value
next_best_Q_value = 0 # if game is over no more reward is forthcoming.
if not done:
next_obs = Observation(obs_dict)
next_action_to_qfs = evaluate_q_functions(next_obs, config)
next_best_action, next_best_Q_value = get_best_q_value(next_action_to_qfs)
# Learn from the experience,
# UNLESS everybody is done (game is over) and we are first, which means that we won.
# in that case, we don't have good ground truth about the score we COULD get.
my_score = env.state[my_index]["reward"]
won = all([g["status"] == "DONE" for g in env.state]) and (
sum([g["reward"] > my_score for g in env.state]) == 0
)
if not won:
# what is the difference in reward prediction between:
# (1) our origial estimate(Q_value)
# (2) a more 'advanced' prediction based on our new knowledge of reward and new resulting state
diff = (reward + discount_factor * next_best_Q_value) - Q_value
learned_values["diff"].append(diff)
# update weights based on this calculated difference
# ("blame") inaccuracies on the individual weights
for q_func in q_weights:
q_weights[q_func] = (
q_weights[q_func]
+ (learning_rate / learn_count) * diff * q_func_values[q_func]
)
learned_values[q_func.__name__].append(q_weights[q_func])
# decrease learning rate
learn_count += 1.0
# Reset trainer if we've died
if done:
obs_dict = trainer.reset()
print("after training:\n")
print("q_weights={")
for q in q_weights:
print(f" {q.__name__}: {q_weights[q]},")
print("}")
import matplotlib.pyplot as plt
plt.subplots(figsize=(15, 3))
plt.plot(range(len(learned_values["diff"])), learned_values["diff"])
plt.show()
plt.plot(range(len(learned_values["hunger_ttl"])), learned_values["hunger_ttl"])
plt.plot(
range(len(learned_values["uncontested_space"])), learned_values["uncontested_space"]
)
plt.plot(
range(len(learned_values["nearest_food_dist"])), learned_values["nearest_food_dist"]
)
plt.plot(range(len(learned_values["delta_length"])), learned_values["delta_length"])
plt.plot(
range(len(learned_values["chance_to_collide"])), learned_values["chance_to_collide"]
)
plt.plot(range(len(learned_values["one"])), learned_values["one"])
import pandas as pd
pd.Series(place_record).rolling(50).mean().plot()
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
# PassengerIds from the test set
test_passengerIDs = pd.read_csv("../input/titanic-datasets/test.csv")["PassengerId"]
test = pd.read_csv("../input/titanic-datasets/test.csv")
train = pd.read_csv("../input/titanic-datasets/train.csv")
# func to submit predictions easily
# (1) parameter, which is the series/dataframe of your predictions
def submit_predictions(predictions):
submission = pd.DataFrame(
{"PassengerId": test_passengerIDs, "Survived": predictions}
)
submission.to_csv("submission.csv", index=False)
test_passengerIDs.head()
# These are the original Titanic competition datasets, no changes
train_orig = pd.read_csv("../input/titanic-datasets/train.csv")
test_orig = pd.read_csv("../input/titanic-datasets/test.csv")
train_orig.head()
test_orig.head()
pred_df = pd.DataFrame()
# # Arjun - Random Forest
# ## Setup
train_random_forest = pd.read_csv("../input/teammatetitanic/train_random_forest.csv")
test_random_forest = pd.read_csv("../input/teammatetitanic/test_random_forest.csv")
train_random_forest.head()
test_random_forest.head()
# ## Training
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
X_random_forest = train_random_forest.drop("Survived", axis=1)
y_random_forest = train_random_forest["Survived"]
RFC = RandomForestClassifier(
n_estimators=30, criterion="gini", max_depth=3, random_state=0
)
# using K-Fold Split
kf = KFold(n_splits=3)
kf.get_n_splits(X_random_forest)
scores = []
for train_index, valid_index in kf.split(X_random_forest):
X_train, X_test, y_train, y_test = (
X_random_forest.iloc[train_index],
X_random_forest.iloc[valid_index],
y_random_forest.iloc[train_index],
y_random_forest.iloc[valid_index],
)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
RFC.fit(X_train, y_train)
predictions = RFC.predict(X_test)
predictions = (predictions > 0.5).astype(int)
print(predictions)
scores.append((predictions == y_test).sum() / len(y_test))
print(confusion_matrix(y_test, predictions))
print(classification_report(y_test, predictions))
X_train_rfc, X_test_rfc, y_train_rfc, y_test_rfc = train_test_split(
X_random_forest, y_random_forest, test_size=0.5, random_state=101
)
rfc_X_preds = RFC.predict(X_test_rfc)
pred_df["rfc"] = rfc_X_preds
pred_df["Survived"] = y_test_rfc
pred_df.info()
print(pd.Series(scores).describe())
print(pd.Series(scores))
# ## Predict
# Scale test data first
scaler = StandardScaler()
scaler.fit(test_random_forest)
test_random_forest_scaled = scaler.transform(test_random_forest)
print(test_random_forest_scaled)
pred_random_forest = RFC.predict(test_random_forest_scaled)
pred_random_forest
# # Richard - SVM
# ## Setup
from sklearn.model_selection import GridSearchCV
train_svm = pd.read_csv("../input/teammatetitanic/train_svm.csv")
test_svm = pd.read_csv("../input/teammatetitanic/test_svm.csv")
train_random_forest.head()
train_svm.head()
test_svm.head()
X_train_svm_columns = [
"Age",
"Fare",
"Cabin",
"Title",
"Has_Cabin",
"Family_Size",
"Is_Alone",
"Age_bin",
"Fare_bin",
"Tick_Len",
"Pclass_Frequency",
"Embarked_Q",
"Embarked_S",
"Male",
]
# ## Training
from sklearn.preprocessing import scale
train_svm.columns != "Survived"
X_train_svm, X_test_svm, y_train_svm, y_test_svm = train_test_split(
train_svm[X_train_svm_columns],
train_svm["Survived"],
test_size=0.5,
random_state=101,
)
X_test_svm.head()
# Important to scale the X data,
# otherwise GridSearchCV, BayesOptCV,
# don't work well
X_train_svm_scaled = scale(X_train_svm)
X_test_svm_scaled = scale(X_test_svm)
from sklearn.svm import SVC
gridsearch_paramGrid = [
{"C": [1, 10], "kernel": ["linear"]},
{"C": [100, 1000, 10000], "gamma": [0.1, 0.01, 0.001], "kernel": ["rbf"]},
]
db_gridsearch_svc = GridSearchCV(
SVC(), param_grid=gridsearch_paramGrid, scoring="accuracy", n_jobs=-1, verbose=5
)
# Will take 2-3 seconds
db_gridsearch_svc.fit(X_train_svm_scaled, y_train_svm)
# check the best parameters out of curiosity
db_gridsearch_svc.best_params_
# Cross validation on results
gridsearch_svc_pred = db_gridsearch_svc.predict(X_test_svm_scaled)
print(classification_report(y_test_svm, gridsearch_svc_pred))
print(confusion_matrix(y_test_svm, gridsearch_svc_pred))
# ## Predict
test_svm_scaled = scale(test_svm)
test_svm_scaled
pred_svc = db_gridsearch_svc.predict(test_svm_scaled)
pred_svc
pred_df["svc"] = db_gridsearch_svc.predict(scale(X_test_svm))
pred_df["Survived"] = y_test_svm
y_test_svm
pred_df.tail()
# # Joseph - Neural Network
# ## Setup
# Imports
from sklearn.tree import DecisionTreeClassifier
from sklearn.impute import SimpleImputer
from sklearn import tree
from matplotlib import pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.utils.vis_utils import plot_model
from keras.optimizers import SGD
from keras.optimizers import RMSprop
from keras.optimizers import Adagrad
from tensorflow import keras
from tensorflow.keras import layers
from kerastuner.tuners import RandomSearch
from numpy.random import randn
from keras.regularizers import l1
from keras.regularizers import l2
import tensorflow as tf
import pathlib
import shutil
import tempfile
from kerastuner.tuners import Hyperband
import kerastuner as kt
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from keras.callbacks import EarlyStopping
from keras.layers import GaussianNoise
### I commented out the below imports because I was getting errors
# !pip install git+https://github.com/tensorflow/docs
# import tensorflow_docs as tfdocs
# import tensorflow_docs.modeling
# import tensorflow_docs.plots
# !pip install ann_visualizer
# from ann_visualizer.visualize import ann_viz
train_neural_network = pd.read_csv("../input/teammatetitanic/train_neural_network.csv")
test_neural_network = pd.read_csv("../input/teammatetitanic/test_neural_network.csv")
train_neural_network.head()
test_neural_network.head()
print("Train Shape: ", train_neural_network.shape)
print("Test Shape: ", test_neural_network.shape)
combined_train_test = [train_neural_network, test_neural_network]
for df in combined_train_test:
df["Sex"].replace({"male": 0, "female": 1}, inplace=True)
df["Embarked"].replace({"S": 0, "C": 1, "Q": 2}, inplace=True)
# ## Training
X_neural_network, y_neural_network = (
train_neural_network.loc[:, train_neural_network.columns != "Survived"],
train_neural_network["Survived"],
)
print(X_neural_network)
print(len(X_neural_network) + len(y_neural_network))
(
X_train_neural_network,
X_test_neural_network,
y_train_neural_network,
y_test_neural_network,
) = train_test_split(
X_neural_network, y_neural_network, test_size=0.5, random_state=101
)
X_train_neural_network.shape, X_test_neural_network.shape
clf = DecisionTreeClassifier(criterion="gini", max_depth=3, random_state=0)
# Fit the model
clf.fit(X_train_neural_network, y_train_neural_network)
# Get various accuracies
print(
"Accuracy on training set: {:.3f}".format(
clf.score(X_train_neural_network, y_train_neural_network)
)
)
print(
"Accuracy on test set: {:.3f}".format(
clf.score(X_test_neural_network, y_test_neural_network)
)
)
pred_neural_network_2 = clf.predict(test_neural_network)
pred_neural_network_2 = (pred_neural_network_2 > 0.5).astype(int)
pred_neural_network_2 = pred_neural_network_2.flatten()
pred_svc = db_gridsearch_svc.predict(test_svm_scaled)
pred_svc
pred_df["nn"] = clf.predict(X_test_neural_network)
pred_df["Survived"] = y_test_neural_network
pred_df.tail()
text_representation = tree.export_text(clf)
print(text_representation)
# Adding dropout layers to remove nodes to decrease the complexity
model = keras.Sequential()
model.add(GaussianNoise(0.01, input_shape=(13,)))
# model.add(Dense(13, input_dim=13, activation='relu', activity_regularizer=l2(1e-4))) # Hidden 1
model.add(Dense(13, input_dim=13, activation="relu", kernel_regularizer=l2(0.001)))
model.add(keras.layers.Dropout(0.3))
model.add(Dense(6, activation="relu", activity_regularizer=l2(1e-4))) # Hidden 2
model.add(keras.layers.Dropout(0.3))
# model.add(Dense(3, activation='relu', activity_regularizer=l2(1e-4))) # Hidden 3
model.add(Dense(3, activation="relu", kernel_regularizer=l2(0.001)))
model.add(keras.layers.Dropout(0.3))
model.add(Dense(1, activation="sigmoid")) # Output
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# es = EarlyStopping(monitor='val_loss', mode='min', verbose=1)
model.fit(X_train_neural_network, y_train_neural_network, epochs=100, batch_size=8)
# #### What worked for reducing overfitting:
# #### 1. Activity regularization (l2 seemed better)
# #### 2. Weight regularization (l2 as well)
# #### 3. Dropout layers
# #### 4. Adding statistical noise during training
keras_scores = model.evaluate(X_test_neural_network, y_test_neural_network)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
print(pd.Series(scores).describe())
y_pred_neural_network = model.predict(X_test_neural_network).reshape(-1)
print(y_pred_neural_network[:10])
# round to 0 or 1
y_pred_neural_network = np.round(y_pred_neural_network)
print(y_pred_neural_network[:10])
y_test_neural_network[:10]
print(classification_report(y_test_neural_network, y_pred_neural_network))
plot_model(model, to_file="titanic_model.png", show_shapes=True, show_layer_names=True)
# ## Predict
predictions_neural_network = model.predict(test_neural_network)
predictions_neural_network = (predictions_neural_network > 0.5).astype(int)
predictions_neural_network = predictions_neural_network.flatten()
print(predictions_neural_network)
# # Michael - LightGBM
# ## Setup
from sklearn import metrics
import lightgbm as lgb # if you don't have it downloaded, do pip install lightgbm
train_lgbm = pd.read_csv("../input/teammatetitanic/train_lgbm.csv")
test_lgbm = pd.read_csv("../input/teammatetitanic/test_lgbm.csv")
train_lgbm.head()
test_lgbm.head()
X_lgbm = train_lgbm.drop("Survived", axis=1)
y_lgbm = train_lgbm["Survived"]
cate_features_name = [
"Pclass",
"Embarked",
"Title",
"Sex",
"Cabin",
"Has_Cabin",
"Is_Alone",
]
# ## Training
# ### For Full Dataset Submission
# Note, you'll need to create a new lgb.Dataset with new X and y values (will likely be X_train and Y_train) whenever you want to train for stacking or within a KFold.
params = {
"boosting_type": "gbdt",
"max_depth": -1,
"objective": "binary",
"nthread": 3,
"num_leaves": 64,
"learning_rate": 0.05,
"max_bin": 512,
"subsample_for_bin": 200,
"subsample": 1,
"subsample_freq": 1,
"colsample_bytree": 0.8,
"reg_alpha": 5,
"reg_lambda": 10,
"min_split_gain": 0.5,
"min_child_weight": 1,
"min_child_samples": 5,
"scale_pos_weight": 1,
"num_class": 1,
"metric": "binary_error",
"num_boost_rounds": 87,
}
X_train_lgbm, X_test_lgbm, y_train_lgbm, y_test_lgbm = train_test_split(
X_lgbm, y_lgbm, test_size=0.5, random_state=101
)
lgb_full_train = lgb.Dataset(
X_train_lgbm,
y_train_lgbm,
categorical_feature=cate_features_name,
free_raw_data=False,
)
submission_model_lgbm = lgb.train(
params,
lgb_full_train,
valid_sets=None,
verbose_eval=10,
)
# ## Predict
# converts decimals into 0 and 1 values.
pred_lgbm = submission_model_lgbm.predict(X_test_lgbm)
pred_lgbm = (pred_lgbm > 0.5).astype(int)
# pred_df['pred_lgbm'] = submission_model_lgbm.predict(X_test_lgbm)
# pred_df['Survived'] = y_test_lgbm
pred_df["lgb"] = pred_lgbm
pred_df["Survived"] = y_test_lgbm
pred_df.head()
# # **Saurav Kumar**
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("../input/titanic-datasets/train_features_generated.csv")
df = df.replace({"Embarked": {"S": 1, "C": 2, "Q": 3}})
df.head()
df = df.drop(["Fare"], axis=1).drop(["Age"], axis=1).drop(["Tick_Len"], axis=1)
dfDropped = df[["Sex", "Pclass", "Age_bin", "Embarked", "Is_Alone"]].copy()
dfDropped.head()
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
set = {"male": 1, "female": 2}
df = df.replace({"Sex": set})
dfDropped = dfDropped.replace({"Sex": set})
df.tail()
df.info()
# dfDropped = df.drop(['Survived'], axis=1).drop('Is_Alone',axis=1).drop('Fare_bin',axis=1).drop('Title',axis=1).drop('Cabin',axis=1)
# .drop('Pclass',axis=1).drop('Fare',axis=1).drop('Has_Cabin',axis=1).drop('Title',axis=1).drop('Family_Size',axis=1).drop('Is_Alone',axis=1).drop('Tick_Len',axis=1).drop('Age_bin',axis=1).drop('Fare_bin',axis=1).drop('Survived',axis=1).drop('Cabin',axis=1).drop('Embarked',axis=1)
dfDropped.head()
scaler.fit(dfDropped)
scaled_features = scaler.transform(dfDropped)
scaled_features
df_without_columns = dfDropped
df_without_columns.columns
df_feat = pd.DataFrame(scaled_features, columns=df_without_columns.columns)
df_feat.head()
from sklearn.model_selection import train_test_split
X = df_feat
y = df["Survived"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=101
)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
pred
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred))
error_rate = []
for i in range(1, 100):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10, 6))
plt.plot(
range(1, 100),
error_rate,
color="blue",
linestyle="dashed",
marker="o",
markerfacecolor="red",
markersize=10,
)
plt.title("Error Rate vs. K Value")
plt.xlabel("K")
plt.ylabel("Error Rate")
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
print("\n")
print(confusion_matrix(y_test, pred))
print("\n")
print(classification_report(y_test, pred))
pred_df["knn"] = pred
pred_df["Survived"] = y_test
survived_df = pd.DataFrame(y_test, columns=["PassengerID", "Survived"])
pred_df["Survived"] = survived_df["Survived"]
testDf = pd.read_csv("../input/titanic-datasets/test_features_generated.csv")
testDf.head()
testDfReal = pd.read_csv("../input/titanic-datasets/test.csv")
testDf = testDf.replace({"Sex": {"male": 1, "female": 2}})
testDf = testDf.replace({"Embarked": {"S": 1, "C": 2, "Q": 3}})
scaler2 = StandardScaler()
testDfDropped = testDf[["Sex", "Pclass", "Age_bin", "Embarked", "Is_Alone"]].copy()
testDfDropped.head()
scaler2.fit(testDfDropped)
testDfDropped
scaled_features2 = scaler2.transform(testDfDropped)
df_without_columns2 = testDfDropped
df_without_columns2.columns
df_feat2 = pd.DataFrame(scaled_features2, columns=df_without_columns2.columns)
df_feat2.head()
testDf
knn = KNeighborsClassifier(n_neighbors=20)
knn.fit(X_train, y_train)
pred_knn = knn.predict(df_feat2)
pred_knn
# ## Ensembling (Stacking)
pred_df = pred_df.drop("Survived", axis=1)
pred_df.head()
survived_df.head()
from sklearn.linear_model import LogisticRegression
meta_model = LogisticRegression()
meta_model.fit(pred_df, survived_df["Survived"])
# # Fitting Meta-Model to the Test Set
pred_df
# First, we'll create the data frame for the model predictions that matches pred_df
test_combined_pred = pd.DataFrame()
test_combined_pred["rfc"] = pd.read_csv(
"../input/teammatetitanic/results_random_forest.csv"
)["Survived"]
test_combined_pred["svc"] = pd.read_csv("../input/teammatetitanic/results_svm.csv")[
"Survived"
]
test_combined_pred["nn"] = pd.read_csv(
"../input/teammatetitanic/results_neural_network.csv"
)["Survived"]
test_combined_pred["lgb"] = pd.read_csv("../input/teammatetitanic/results_lgbm.csv")[
"Survived"
]
test_combined_pred["knn"] = pd.read_csv("../input/teammatetitanic/sauravknntest.csv")[
"Survived"
]
test_combined_pred
meta_pred = meta_model.predict(test_combined_pred)
test = pd.read_csv("../input/titanic-datasets/test.csv")
submission = pd.DataFrame()
submission["PassengerId"] = test["PassengerId"]
submission["Survived"] = meta_pred
submission.to_csv("stacking_v1.csv", index=False)
submission.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/housesalesprediction/kc_house_data.csv")
df.drop(columns=["date", "zipcode", "waterfront", "id", "view"], inplace=True)
df.drop(columns=["sqft_living15", "sqft_lot15", "lat", "long"], inplace=True)
df.drop(columns=["yr_renovated"], inplace=True)
df1 = pd.read_csv("/kaggle/input/housesalesprediction/kc_house_data.csv")
df.head()
df.columns
features = [
"price",
"bedrooms",
"bathrooms",
"sqft_living",
"sqft_lot",
"floors",
"condition",
"grade",
"sqft_above",
"sqft_basement",
"yr_built",
"yr_renovated",
"lat",
"long",
"sqft_living15",
"sqft_lot15",
]
y = df["price"]
from sklearn.model_selection import cross_val_score, train_test_split
X_train, X_test, y_train, y_test = train_test_split(df[features], y, test_size=0.3)
X_train.shape
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
baseline_pred = lin_reg.predict(X_test)
from sklearn.metrics import mean_squared_error
lin_mse = mean_squared_error(y_test, baseline_pred)
lin_reg.score(X_test, y_test)
print(lin_mse)
import xgboost
xgb = xgboost.XGBRegressor(
n_estimators=100,
learning_rate=0.08,
gamma=0,
subsample=0.75,
colsample_bytree=1,
max_depth=7,
)
from sklearn.ensemble import RandomForestClassifier
rnd_clf = RandomForestClassifier()
rnd_clf.fit(X_train, y_train)
X_train, X_test, y_train, y_test = train_test_split(df[features], y, test_size=0.3)
xgb.fit(X_train, y_train)
from sklearn.metrics import explained_variance_score
predictions = xgb.predict(X_test)
print(explained_variance_score(predictions, y_test))
score_xgb = xgb.score(X_test, y_test).mean()
print(score_xgb)
import seaborn as sns
sns.countplot(data=df, x=df["bedrooms"])
sns.distplot(df["price"])
sns.scatterplot(df["bedrooms"], df["price"])
sns.scatterplot(df["bathrooms"], df["price"])
sns.scatterplot(df["sqft_lot"], df["price"])
sns.scatterplot(df["zipcode"], df["price"])
df["zipcode"].nunique()
from category_encoders import TargetEncoder
encoder = TargetEncoder()
df["zipcode_Encoded"] = encoder.fit_transform(df["zipcode"], df["price"])
df_corr = df.corr()
import seaborn as sns
sns.heatmap(df_corr, annot_kws={"size": 15})
print(df_corr.T)
|
# QUICK WORKAROUND FOR PROBLEM WITH PANDAS
import os
import time
from tqdm import tqdm
from tqdm import trange
# Essential DS libraries
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
import torch
# LightAutoML presets, task and report generation
from lightautoml.automl.presets.tabular_presets import TabularAutoML
from lightautoml.tasks import Task
import datetime
from collections import Counter
import category_encoders as ce
import datetime
import re
import seaborn as sns
import matplotlib.pyplot as plt
N_THREADS = 4
N_FOLDS = 5
RANDOM_STATE = 42
TEST_SIZE = 0.25
TIMEOUT = 900 # equal to 15 minutes
TARGET_NAME = "reviewer_score"
np.random.seed(RANDOM_STATE)
torch.set_num_threads(N_THREADS)
# загрузка данных
hotels_test = pd.read_csv("/kaggle/input/sf-booking/hotels_test.csv")
hotels_train = pd.read_csv("/kaggle/input/sf-booking/hotels_train.csv")
submission = pd.read_csv("/kaggle/input/sf-booking/submission.csv")
# Объединяем трейн и тест в один датасет
print("объединяем", len(hotels_train))
hotels_train["sample"] = 1 # помечаем где у нас трейн
hotels_test["sample"] = 0 # помечаем где у нас тест
hotels_test[
"reviewer_score"
] = 0 # в тесте у нас нет значения reviewer_score, мы его должны предсказать, по этому пока просто заполняем нулями
hotels = hotels_train.append(hotels_test, sort=False).reset_index(
drop=True
) # объединяем
print("после", len(hotels))
# добавление признаков
print(
"Старт",
f"{datetime.datetime.now().hour}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
count = 0
with trange(15) as t:
for i in t:
# (1) пометка дубликатов (dup)
# не удаляем строки, а помечаем их отдельным признаком incorrect
if count == 0:
t.set_description("dupl incorrect (%i)" % i)
t.set_postfix(str="добавление дубликатов в incorrect")
hotels_dd = hotels_train[hotels_train.duplicated()]
for ind in hotels_dd.index:
hotels.loc[ind, "incorrect"] = 1
count = 1
continue
# (2) пометка подозрительных\неверных строк (inc)
# не удаляем строки, а помечаем их отдельным признаком incorrect
elif count == 1:
t.set_description("strn incorrect (%i)" % i)
t.set_postfix(str="добавление подозрительных в incorrect")
list_hotel = hotels["hotel_name"].value_counts().index
for name in list_hotel:
dds = hotels[hotels["hotel_name"] == name]
country_top = (
hotels[hotels["hotel_name"] == name]["reviewer_nationality"]
.value_counts()
.head(1)
.index
)
for ind in dds.index:
if dds.loc[ind, "reviewer_nationality"] != country_top[0]:
hotels.loc[ind, "incorrect"] = 1
hotels["incorrect"] = hotels["incorrect"].fillna(0)
count = 2
continue
# (3) добавление выбросов в подозрительных\неверных строк (inc)
# не удаляем строки, а помечаем их отдельным признаком incorrect
elif count == 2:
t.set_description("score_incorrect (%i)" % i)
t.set_postfix(str="добавление выбросов в incorrect")
score_incorrect = [
3.1,
4.4,
5.6,
6.9,
3.0,
8.1,
9.4,
3.5,
4.0,
4.5,
5.5,
6.0,
6.5,
7.0,
8.5,
8.0,
9.0,
9.5,
]
ggg = []
for score in score_incorrect:
ind_score = hotels[hotels["reviewer_score"] == score].index
ggg.append(len(ind_score))
for ind in ind_score:
hotels.loc[ind, "incorrect"] = 1
count = 3
continue
# (4) преобразуем негативный отзыв в negative_false (nf3)
# был произведен отбор максимально некорректных отзывов и сохранен в negative_false_v3.xlsx
# после создан признак negative_false с некорректными
elif count == 3:
t.set_description("negative_false (%i)" % i)
t.set_postfix(str="создание признака negative_false")
negative_false = pd.read_excel(
"/kaggle/input/for-submission/negative_false_v3.xlsx"
)
list_neg = list(negative_false["negative_review"])
def get_negative_false(args):
if args in list_neg:
return 1
else:
return 0
hotels["negative_false"] = hotels["negative_review"].apply(
get_negative_false
)
count = 4
continue
# (5) преобразуем позитивный отзыв в positive_false (pf3)
# был произведен отбор максимально некорректных отзывов и сохранен в positive_false_v3.xlsx
# после создан признак positive_false с некорректными
elif count == 4:
t.set_description("positive_false (%i)" % i)
t.set_postfix(str="создание признака positive_false")
positive_false = pd.read_excel(
"/kaggle/input/for-submission/positive_false_v3.xlsx"
)
list_pos = list(positive_false["positive_review"])
def get_positive_false(args):
if args in list_pos:
return 1
else:
return 0
hotels["positive_false"] = hotels["positive_review"].apply(
get_positive_false
)
count = 5
continue
# (6) расчет total_average_score (ta)
# total_average_score средний показатель average_score за весь период данных
elif count == 5:
t.set_description("total_average (%i)" % i)
t.set_postfix(str="создание признака total_average_score")
name_list = hotels["hotel_name"].unique()
for name in name_list:
AVR_name = round(
hotels[hotels["hotel_name"] == name]["reviewer_score"].mean(), 1
)
index_list = hotels[hotels["hotel_name"] == name].index
for ind in index_list:
hotels.loc[ind, "total_average_score"] = AVR_name
count = 6
continue
# (7) создание признака неделя (we)
elif count == 6:
t.set_description("weekday (%i)" % i)
t.set_postfix(str="создание признака weekday")
def get_weekday(args):
gg = datetime.datetime.strptime(str(args), "%m/%d/%Y")
return gg.weekday()
hotels["weekday"] = hotels["review_date"].apply(get_weekday)
count = 7
continue
# (8) обновление координат (up cor)
# с помощью библиотеги geopy определили координаты по названию отеля и внесли в deff_loc.csv и обновили датасет
elif count == 7:
t.set_description("update cordina (%i)" % i)
t.set_postfix(str="обновление координат")
deff_loc = pd.read_csv("/kaggle/input/for-submission/deff_loc.csv")
deff_loc = deff_loc.drop(["lat_new_lng_", "lat", "lng"], axis=1)
deff_loc = deff_loc.rename(
columns={"Unnamed: 0": "hotel_name", "lat_new": "lat", "lng_new": "lng"}
)
for name in deff_loc["hotel_name"]:
list_index = hotels[hotels["hotel_name"] == name].index
for i in list_index:
hotels.loc[i, "lat"] = deff_loc[deff_loc["hotel_name"] == name][
"lat"
].values
count = 8
continue
# (9) вытягивание неверного признака из count (reb inc)
# как оказалось в признаке review_total_positive_word_counts 0 характеризует по сути отмену признака positive и negative_review
# потому создаем новый признак rebild_incorrect_negative
elif count == 8:
t.set_description("rebild_incorr (%i)" % i)
t.set_postfix(str="создание признака rebild_incorrect")
for i in range(len(hotels)):
if hotels.loc[i, "review_total_negative_word_counts"] == 0:
hotels.loc[i, "rebild_incorrect_negative"] = 1
if hotels.loc[i, "review_total_positive_word_counts"] == 0:
hotels.loc[i, "rebild_incorrect_positive"] = 1
hotels["rebild_incorrect_negative"] = hotels[
"rebild_incorrect_negative"
].fillna(0)
hotels["rebild_incorrect_positive"] = hotels[
"rebild_incorrect_positive"
].fillna(0)
count = 9
continue
# (10) высокие оценки в первые дни недели (rec_we)
# Моя гипотеза о силе отзыва в первые дни недели для негатива и последние дни раб недели для позитива переросли в
# в банальный отсев выбросов
elif count == 9:
t.set_description("recall_weight (%i)" % i)
t.set_postfix(str="создание признака recall_weight")
mask1 = hotels["total_average_score"] > 4.7
mask2 = hotels["total_average_score"] < 7.5
ind_p = hotels[mask1 & mask2].index
for i in ind_p:
hotels.loc[i, "recall_weight"] = 1
hotels["recall_weight"] = hotels["recall_weight"].fillna(0)
count = 10
continue
# (11) создание признака месяц (mo)
elif count == 10:
t.set_description("month (%i)" % i)
t.set_postfix(str="создание признака month")
def get_month(args):
gg = datetime.datetime.strptime(str(args), "%m/%d/%Y")
return gg.month
hotels["month"] = hotels["review_date"].apply(get_month)
count = 11
continue
# (12) создание len_tags (len_t)
# признак кол-ва тегов в отзыве
elif count == 11:
t.set_description("len_tags (%i)" % i)
t.set_postfix(str="создание признака len_tags")
def get_len_tags(args):
return len(args.split(", "))
hotels["len_tags"] = hotels["tags"].apply(get_len_tags)
count = 12
continue
# (13) ручное дополнение координат
# обновление координат, которые автоматически нельзя было достать
elif count == 12:
t.set_description("FIX_1_update corr (%i)" % i)
t.set_postfix(str="ввод недостающих координат")
# Расчет общей средней оценкой и восстановление координат
ll = hotels[
hotels["hotel_name"] == "Fleming s Selection Hotel Wien City"
].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.2094054
hotels.loc[ind, "lng"] = 16.3512565
ll = hotels[hotels["hotel_name"] == "Hotel City Central"].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.2135906
hotels.loc[ind, "lng"] = 16.3777195
ll = hotels[hotels["hotel_name"] == "Hotel Atlanta"].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.2234239
hotels.loc[ind, "lng"] = 16.3516895
ll = hotels[
hotels["hotel_name"] == "Maison Albar Hotel Paris Op ra Diamond"
].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.8753208
hotels.loc[ind, "lng"] = 2.3212033
ll = hotels[hotels["hotel_name"] == "Hotel Daniel Vienna"].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.1888741
hotels.loc[ind, "lng"] = 16.383408
ll = hotels[
hotels["hotel_name"] == "Hotel Pension Baron am Schottentor"
].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.2168032
hotels.loc[ind, "lng"] = 16.357717
ll = hotels[
hotels["hotel_name"]
== "Austria Trend Hotel Schloss Wilhelminenberg Wien"
].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.219573
hotels.loc[ind, "lng"] = 16.2834034
ll = hotels[hotels["hotel_name"] == "NH Collection Barcelona Podium"].index
for ind in ll:
hotels.loc[ind, "lat"] = 41.391552
hotels.loc[ind, "lng"] = 2.1757053
ll = hotels[
hotels["hotel_name"] == "Derag Livinghotel Kaiser Franz Joseph Vienna"
].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.2458909
hotels.loc[ind, "lng"] = 16.3397395
ll = hotels[hotels["hotel_name"] == "City Hotel Deutschmeister"].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.2208555
hotels.loc[ind, "lng"] = 16.3644228
ll = hotels[hotels["hotel_name"] == "Holiday Inn Paris Montmartre"].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.8889127
hotels.loc[ind, "lng"] = 2.3309643
ll = hotels[hotels["hotel_name"] == "Hotel Park Villa"].index
for ind in ll:
hotels.loc[ind, "lat"] = 54.7314468
hotels.loc[ind, "lng"] = 25.2987564
ll = hotels[hotels["hotel_name"] == "Cordial Theaterhotel Wien"].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.2095525
hotels.loc[ind, "lng"] = 16.3492746
ll = hotels[hotels["hotel_name"] == "Roomz Vienna"].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.2096079
hotels.loc[ind, "lng"] = 16.316444
ll = hotels[hotels["hotel_name"] == "Mercure Paris Gare Montparnasse"].index
for ind in ll:
hotels.loc[ind, "lat"] = 48.8399957
hotels.loc[ind, "lng"] = 2.3213731
ll = hotels[hotels["hotel_name"] == "Hotel Advance"].index
for ind in ll:
hotels.loc[ind, "lat"] = 41.3832385
hotels.loc[ind, "lng"] = 2.1629496
ll = hotels[hotels["hotel_name"] == "Renaissance Barcelona Hotel"].index
for ind in ll:
hotels.loc[ind, "lat"] = 41.3749664
hotels.loc[ind, "lng"] = 2.1277449
count = 13
continue
# (14) Правка типа данных
# приведение некоторых столбцов из flost к int
elif count == 13:
t.set_description("FIX_1_type (%i)" % i)
t.set_postfix(str="измененние типа данных")
hotels["rebild_incorrect_positive"] = hotels[
"rebild_incorrect_positive"
].astype(int)
hotels["rebild_incorrect_negative"] = hotels[
"rebild_incorrect_negative"
].astype(int)
hotels["recall_weight"] = hotels["recall_weight"].astype(int)
hotels["incorrect"] = hotels["incorrect"].astype(int)
count = 14
continue
# (15) конечный, завершающий этап
elif count == 14:
t.set_description("завершено (%i)" % i)
t.set_postfix(str="завершено")
count = 15
continue
print(
"Завершено",
f"{datetime.datetime.now().hour}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
hotels.to_csv("hotels_clean_3.csv")
print("сохранен hotels_clean_3")
hotels = pd.read_csv("/kaggle/input/for-submission/hotels_clean_3.csv")
hotels = hotels.drop(["Unnamed: 0"], axis=1)
hotels.head(2)
# создание negative_review_weight - 6 часов
# редактирование признака, убираем пробелы на концах
print(
"Старт",
f"{datetime.datetime.now().hour}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
def get_strip(args):
return args.strip(" ")
hotels["negative_review_v2"] = hotels["negative_review"].apply(get_strip)
# срез кол-ва отзывов negative_review_v2
ddd = hotels[hotels["sample"] == 1]["negative_review_v2"].value_counts()
ddd = ddd.reset_index()
ddd = ddd.rename(columns={"negative_review_v2": "count"})
ddd = ddd.rename(columns={"index": "negative_review_v2"})
ddd["normalize"] = round(ddd["count"] / ddd["count"].sum(), 6)
# создание веса признака negative_review_v2
# по срезу находим соотношение признака от общего числа
def get_review_weight(args):
return ddd[ddd["negative_review_v2"] == args]["normalize"].sum()
hotels["negative_review_weight"] = hotels["negative_review_v2"].apply(get_review_weight)
hotels.to_csv("hotels_negative_review_weight.csv")
print("сохранен hotels_negative_review_weight_v")
print(
"Завершено",
f"{datetime.datetime.now().hour}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
hotels = pd.read_csv("/kaggle/input/for-submission/hotels_negative_review_weight.csv")
hotels = hotels.drop(["Unnamed: 0"], axis=1)
hotels.head(2)
# создание positive_review_weight - 6 часов
print(
"Старт",
f"{datetime.datetime.now().hour+5}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
def get_strip(args):
return args.strip(" ")
hotels["positive_review_v2"] = hotels["positive_review"].apply(get_strip)
# срез кол-ва отзывов positive_review_v2
ddd = hotels[hotels["sample"] == 1]["positive_review_v2"].value_counts()
ddd = ddd.reset_index()
ddd = ddd.rename(columns={"positive_review_v2": "count"})
ddd = ddd.rename(columns={"index": "positive_review_v2"})
ddd["normalize"] = round(ddd["count"] / ddd["count"].sum(), 6)
# создание веса признака positive_review_v2
# по срезу находим соотношение признака от общего числа
def get_review_weight(args):
return ddd[ddd["positive_review_v2"] == args]["normalize"].sum()
hotels["positive_review_weight"] = hotels["positive_review_v2"].apply(get_review_weight)
hotels.to_csv("hotels_positive_review_weight.csv")
print("сохранен hotels_positive_review_weight")
print(
"Завершено",
f"{datetime.datetime.now().hour+5}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
hotels = pd.read_csv("/kaggle/input/for-submission/hotels_positive_review_weight_2.csv")
hotels = hotels.drop(["Unnamed: 0"], axis=1)
hotels.head(2)
# Проверка на пустые значения
gg = hotels.isnull()
ind = gg.columns
for col in ind:
print(col, "||", gg[col].value_counts())
# корректировка пропуска значений negative_review_v2
gg = hotels.isnull()
ind = gg[gg["negative_review_v2"] == True].index
for i in ind:
if hotels.loc[i, "negative_review"] == " NA":
hotels.loc[i, "negative_review_v2"] = "NA"
if hotels.loc[i, "negative_review"] == " ":
hotels.loc[i, "negative_review_v2"] = "NA"
if hotels.loc[i, "negative_review"] == " NA ":
hotels.loc[i, "negative_review_v2"] = "NA"
# корректировка пропуска значений positive_review_v2
gg = hotels.isnull()
ind = gg[gg["positive_review_v2"] == True].index
for i in ind:
hotels.loc[i, "positive_review_v2"] = "NA"
# подготовка к тестированию
data = hotels.copy()
# data = data.drop(['additional_number_of_scoring', 'rebild_incorrect_negative', 'rebild_incorrect_positive'], axis=1)
for col in data.columns:
if col == "index":
data = data.drop(columns=col)
print("удален index")
if col == "Unnamed: 0":
data = data.drop(columns=col)
print("удален Unnamed: 0")
if col == 0:
data = data.drop(columns=col)
print("удален 0")
print(data.columns)
# Теперь выделим тестовую часть
dd = data[data["sample"] == 1]
hotels_train = dd.drop(["sample"], axis=1)
vv = data[data["sample"] == 0]
hotels_test = vv.drop(["sample"], axis=1)
print(
"Завершено",
f"{datetime.datetime.now().hour+5}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
# LAMA
train_data = hotels_train.copy()
# train_data = train_data.drop(['review_date'], axis=1)
test_data = hotels_test.copy()
def create_expert_feats(data):
pass
create_expert_feats(train_data)
create_expert_feats(test_data)
tr_data, te_data = train_test_split(
train_data, test_size=TEST_SIZE, random_state=RANDOM_STATE
)
print(
f"Data splitted. Parts sizes: tr_data = {tr_data.shape}, te_data = {te_data.shape}"
)
tr_data.head()
task = Task("reg", loss="mae", metric="mae")
roles = {"target": TARGET_NAME, "drop": ["index", "Unnamed: 0"]}
automl = TabularAutoML(
task=task,
timeout=TIMEOUT,
cpu_limit=N_THREADS,
reader_params={"n_jobs": N_THREADS, "cv": N_FOLDS, "random_state": RANDOM_STATE},
)
print(
"Завершено",
f"{datetime.datetime.now().hour+5}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
oof_pred = automl.fit_predict(tr_data, roles=roles, verbose=1)
print(
"Завершено",
f"{datetime.datetime.now().hour+5}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
print(automl.create_model_str_desc())
print(
"Завершено",
f"{datetime.datetime.now().hour+5}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
te_pred = automl.predict(te_data)
print(f"Prediction for te_data:\n{te_pred}\nShape = {te_pred.shape}")
print(
"Завершено",
f"{datetime.datetime.now().hour+5}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
# переменная списка пройденых тестов, для сравнения
result = [
0.15206,
0.42437,
0.14136,
0.14399,
0.14187,
0.14192,
0.14181,
0.14207,
0.14462,
0.14604,
0.1466,
]
mae_train = round(
mean_absolute_error(tr_data[TARGET_NAME].values, oof_pred.data[:, 0]), 5
)
MAPE_train = round(1 - mae_train, 5)
mae_HOLDOUT = round(
mean_absolute_error(te_data[TARGET_NAME].values, te_pred.data[:, 0]), 5
)
MAPE_HOLDOUT = round(1 - mae_HOLDOUT, 5)
print(f"TRAIN out-of-fold score: {mae_train} MAPE {MAPE_train}")
print(f"HOLDOUT score: {mae_HOLDOUT} MAPE {MAPE_HOLDOUT}")
result.append(MAPE_HOLDOUT)
result
# Fast feature importances calculation
fast_fi = automl.get_feature_scores("fast")
fast_fi.set_index("Feature")["Importance"].plot.bar(figsize=(30, 10), grid=True)
# Accurate feature importances calculation (Permutation importances) - can take long time to calculate on bigger datasets
print(
"Старт",
f"{datetime.datetime.now().hour+5}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
accurate_fi = automl.get_feature_scores("accurate", te_data, silent=False)
accurate_fi.set_index("Feature")["Importance"].plot.bar(figsize=(30, 10), grid=True)
print(
"Завершено",
f"{datetime.datetime.now().hour+5}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
test_pred = automl.predict(test_data)
print(f"Prediction for te_data:\n{test_pred}\nShape = {test_pred.shape}")
submission = pd.read_csv("/kaggle/input/sf-booking/submission.csv")
submission[TARGET_NAME] = test_pred.data[:, 0]
submission.to_csv("submission.csv", index=False)
print(
"Завершено",
f"{datetime.datetime.now().hour+5}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}",
)
# проверка на мультиколлинеарность
# как показала практика, колинеарность не влияет на конечный результат, а удаление признаков снижает score
import seaborn as sns
import matplotlib.pyplot as plt
# hotels = hotels_train.drop('average_score', axis=1)
# hotels_train = hotels_train.drop(['additional_number_of_scoring', 'average_score'], axis=1)
fig, axis = plt.subplots(figsize=(14, 8)) # создаём фигуру, прописываем её размеры
cont = np.eye((2))
sns.heatmap(
hotels_train.corr(), annot=True, fmt=".2g"
) # ax=axis присваиваем оси фигуры осям рисуемого графика
fig
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # ABOUT DATA PROBLEM ::
# Credit scoring is a widely used risk management technique in the financial sector. It makes use of personal information and data provided by credit card applicants to estimate future bankruptcies and credit card loans. The bank has the authority to determine whether or not to provide the applicant a credit card. Credit scores can estimate the level of risk objectively.
# A machine learning model is required to predict if an application is a 'good' or 'bad' user in this dataset. However, there is no definition of 'good' or 'bad'. Additionally, the unbalanced data problem is a significant issue in this project.
# There are 2 datasets will be used in this notebook:
# **Application record (contains general information about applicant, such as applicant gender, DOB, education type, assets that applicant had, etc.)**
# **Credit record (contains applicant's loan payment records)**
# # Reading & Analyzing Data
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
df_application = pd.read_csv(
"/kaggle/input/credit-card-approval-prediction/application_record.csv"
)
df_credit = pd.read_csv(
"/kaggle/input/credit-card-approval-prediction/credit_record.csv"
)
print("Shape of application data", df_application.shape)
print("-------------------------------------------")
print("Shape of credit data", df_credit.shape)
print("Columns of application data", df_application.columns)
print("-------------------------------------------")
print("Columns of credit data", df_credit.columns)
print("data type of application data", df_application.dtypes)
print("-------------------------------------------")
print("data type of credit data", df_credit.dtypes)
print("columns in application data")
print(
"--------------------------------------------------------------------------------"
)
cat_app_data = [i for i in df_application.select_dtypes(include=np.object).columns]
num_app_data = [i for i in df_application.select_dtypes(include=np.number).columns]
print("categorical columns in application data", cat_app_data)
print(
"--------------------------------------------------------------------------------"
)
print("numerical columns in application data", num_app_data)
print("columns in credit data")
print(
"--------------------------------------------------------------------------------"
)
cat_credit_data = [i for i in df_credit.select_dtypes(include=np.object).columns]
num_credit_data = [i for i in df_credit.select_dtypes(include=np.number).columns]
print("categorical columns in application data", cat_credit_data)
print(
"--------------------------------------------------------------------------------"
)
print("numerical columns in application data", num_credit_data)
df_application.sample(5)
df_credit.sample(5)
df_final = pd.merge(df_application, df_credit, on="ID", how="inner")
df_final.shape
df_final.columns
df_final.describe().T
# The current data file does not have credit card open date. Based on MONTHS_BALANCE, we will find the start month for each of the applicant. Then rearrange the data so that the status is available for month 0 (start month), month 1 (month 1 from the start) and so on.
# We assume for the data the earliest MONTHS_BALANCE is the start month for an account. So, we aim to change the as per the below diagram
# Earliest Month
credit_card_first_month = (
df_final.groupby(["ID"]).agg(start_month=("MONTHS_BALANCE", min)).reset_index()
)
credit_card_first_month.head()
# We assume that the data were extracted as of 1-Jan-2020 and we are working on finding the calendar start month for each of the accounts. It may be useful to have the calendar account open date for a few analyses.
import datetime
credit_card_first_month["account_open_month"] = datetime.datetime.strptime(
"2020-01-01", "%Y-%m-%d"
)
credit_card_first_month["account_open_month"] = credit_card_first_month[
"account_open_month"
] + credit_card_first_month["start_month"].values.astype("timedelta64[M]")
credit_card_first_month["account_open_month"] = credit_card_first_month[
"account_open_month"
].dt.strftime("%b-%Y")
credit_card_first_month.head()
# Account 5008804 has been opened in Oct-2018 and the account 5008805 was opened in Nov-2018. We need to add the start month column to the credit status (credit_status) table.
# join the table
credit_start_status = pd.merge(
credit_card_first_month, df_credit, how="left", on=["ID"]
)
credit_start_status["start_month"] = (
abs(credit_start_status["start_month"]) + credit_start_status["MONTHS_BALANCE"]
)
credit_start_status.head()
# You can see the status by month since the start (start_month) and the relevance of what we have done.
# Across all the acquisition months, now we can find portfolio performance across month 1, 2,5, 15, 20 months from their respective account open month. Distribution of account by the status across each of the months is calculated. First, we need to find the accounts by month and status code.
credit_start_status["STATUS"].value_counts()
accounts_counts = pd.DataFrame(
{"start_month": credit_start_status.groupby("start_month")["start_month"].count()}
)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.bar(accounts_counts.index, accounts_counts["start_month"])
plt.show()
# We want to calculate % Bad Rate for the overall portfolio - across all the account open months. This will help us find the period by which the overall bad rate is stable.
# Another important observation is that only a small volume of credit card accounts was opened in the early months. These accounts may not be relevant to consider for the modeling. We can check the Bad Rate distribution for these accounts.
month_status_counts = (
credit_start_status.groupby(["start_month", "STATUS"])
.size()
.reset_index(name="counts")
)
month_counts = (
credit_start_status.groupby(["start_month"]).size().reset_index(name="month_counts")
)
# join the table
month_status_pct = pd.merge(
month_status_counts, month_counts, how="left", on=["start_month"]
)
month_status_pct["status_pct"] = (
month_status_pct["counts"] / month_status_pct["month_counts"] * 100
)
month_status_pct = month_status_pct.loc[:, ["start_month", "STATUS", "status_pct"]]
# Restucture
month_status_pct1 = month_status_pct.pivot(
index="start_month", columns="STATUS", values="status_pct"
)
# Fill with 0
month_status_pct1 = month_status_pct1.fillna(0).reset_index()
import matplotlib.pyplot as pt
pt.plot(
month_status_pct1.index,
month_status_pct1["4"] + month_status_pct1["5"],
color="green",
linestyle="solid",
linewidth=2,
markersize=12,
)
pt.xlabel("Months Since Opened")
pt.ylabel("% Bad Rate")
# Bad Rate jump significantly for the accounts which are opened for over 50 months. There are the accounts that were opened for the initial days of the operations. It may not be a bad idea to exclude these accounts
month_status_pct2 = month_status_pct1.loc[month_status_pct1.index <= 50]
# drop column start_month
month_status_pct2 = month_status_pct2.drop("start_month", axis=1)
import matplotlib.pyplot as plot
month_status_pct2.plot.area(stacked=True)
plot.show(block=True)
import matplotlib.pyplot as pt
pt.plot(
month_status_pct2.index,
month_status_pct2["4"] + month_status_pct2["5"],
color="green",
linestyle="solid",
linewidth=2,
markersize=12,
)
# The bad rate is almost settled after 18 months from the start, we may decide to consider it as a performance window. Any of the accounts which become bad in the first 18 months, will be termed as Bad and rest as Good.
# There may be a difference in performance - Bad rate % by acquisition month. But we are not it exploring that further. Based on status 4 and 5 in the first 18 months, we will term as Bad and otherwise Good.
# We will select start months less than 18 (so only the first 18 months are considered) and find max status for each of the credit card account. If the status is 4 or 5, we can call Bad otherwise Good.
df_credit["STATUS"].value_counts()
credit_start_status.groupby("STATUS")["STATUS"].count()
credit_start_status1 = credit_start_status.loc[
(df_credit["STATUS"] != "X") & (df_credit["STATUS"] != "C"), :
]
credit_start_status1["status"] = credit_start_status1["STATUS"]
credit_start_status1 = credit_start_status1.loc[
credit_start_status1["start_month"] <= 18, ["ID", "start_month", "status"]
]
credit_start_status1 = credit_start_status1[(credit_start_status1["status"] != "C")]
credit_start_status1 = credit_start_status1[(credit_start_status1["status"] != "X")]
credit_start_status1
# Find Max Status Values
status = (
credit_start_status1.groupby(["ID"])
.agg(
# Max Status
max_status=("status", "max")
)
.reset_index()
)
# Validate
status.groupby("max_status")["max_status"].count()
import numpy as np
# Define
status["label"] = np.where(status["max_status"].astype(int) >= int(4), 1, 0)
# Validate
status.groupby("label")["label"].count()
# The data is highly unbalanced - with a bad rate of 0.47%. We can create a biased sample. Taking all observations of Label 1 but small % of observations from label 0. We may want to improve the bad rate to say 10%. So, in the final sample, we will 86 for label 1 and 17968 for label 0.
#
status.groupby("label")["label"].count() * 100 / len(status["label"])
# The data is highly unbalanced - with a bad rate of 0.47%. We can create a biased sample. Taking all observations of Label 1 but small % of observations from label 0. We may want to improve the bad rate to say 10%. So, in the final sample, we will 189 for label 1 and 1701 for label 0.
# Now we want to select randomly 1701 observations from 39562.
# All with label 1
label_1 = status.loc[status["label"] == 1, :]
# All with label 0
label_0 = status.loc[status["label"] == 0, :]
# Select randomly few rows
label_0_biased = label_0.sample(n=1701)
# Combined Sample IDs with Biased Sampling
frames = [label_1, label_0_biased]
import pandas as pd
labels_biased = pd.concat(frames)
# Keep only ID and Label Columns
labels_biased = labels_biased.loc[:, ["ID", "label"]]
labels_biased
# Combine Labels and Application Data
model_df = pd.merge(labels_biased, df_application, how="inner", on=["ID"])
len(model_df)
model_df.tail()
model_df.groupby("label")["label"].count() * 100 / len(model_df["label"])
# Check if missing values
def missing_values_table(df):
mis_val = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum() / len(df)
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
mis_val_table_ren_columns = mis_val_table.rename(
columns={0: "Missing Values", 1: "% of Total Values"}
)
mis_val_table_ren_columns = (
mis_val_table_ren_columns[mis_val_table_ren_columns.iloc[:, 1] != 0]
.sort_values("% of Total Values", ascending=False)
.round(1)
)
print(
"Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are "
+ str(mis_val_table_ren_columns.shape[0])
+ " columns that have missing values."
)
return mis_val_table_ren_columns
# source: https://stackoverflow.com/questions/26266362/how-to-count-the-nan-values-in-a-column-in-pandas-dataframe
missing_values_table(model_df)
# Find Continuous and Categorical Features
def featureType(df):
import numpy as np
from pandas.api.types import is_numeric_dtype
columns = df.columns
rows = len(df)
colTypeBase = []
colType = []
for col in columns:
try:
try:
uniq = len(np.unique(df[col]))
except:
uniq = len(df.groupby(col)[col].count())
if rows > 10:
if is_numeric_dtype(df[col]):
if uniq == 1:
colType.append("Unary")
colTypeBase.append("Unary")
elif uniq == 2:
colType.append("Binary")
colTypeBase.append("Binary")
elif rows / uniq > 3 and uniq > 5:
colType.append("Continuous")
colTypeBase.append("Continuous")
else:
colType.append("Continuous-Ordinal")
colTypeBase.append("Ordinal")
else:
if uniq == 1:
colType.append("Unary")
colTypeBase.append("Category-Unary")
elif uniq == 2:
colType.append("Binary")
colTypeBase.append("Category-Binary")
else:
colType.append("Categorical-Nominal")
colTypeBase.append("Nominal")
else:
if is_numeric_dtype(df[col]):
colType.append("Numeric")
colTypeBase.append("Numeric")
else:
colType.append("Non-numeric")
colTypeBase.append("Non-numeric")
except:
colType.append("Issue")
# Create dataframe
df_out = pd.DataFrame(
{
"Feature": columns,
"BaseFeatureType": colTypeBase,
"AnalysisFeatureType": colType,
}
)
return df_out
featureType(model_df)
from datetime import timedelta
model_df["BIRTH_DATE"] = datetime.datetime.strptime(
"2020-01-01", "%Y-%m-%d"
) + model_df["DAYS_BIRTH"].apply(pd.offsets.Day)
# DAYS_EMPLOYED: Count backwards from current day(0). If positive, it means the person currently unemployed.
# Update DAYS_EMPLOYED greater than 0 to 31
model_df.loc[model_df.DAYS_EMPLOYED > 0, "DAYS_EMPLOYED"] = 31
model_df["EMPLOYMENT_START_DATE"] = datetime.datetime.strptime(
"2020-01-01", "%Y-%m-%d"
) + model_df["DAYS_EMPLOYED"].apply(pd.offsets.Day)
model_df.head()
model_df = pd.merge(
model_df,
credit_card_first_month.loc[:, ["ID", "account_open_month"]],
how="inner",
on=["ID"],
)
len(model_df)
# Age in months
model_df["age_months"] = (
pd.to_datetime(model_df["account_open_month"], format="%b-%Y") - model_df.BIRTH_DATE
) / np.timedelta64(1, "M")
model_df["age_months"] = model_df["age_months"].astype(int)
# Experience/Employment in Months
model_df["employment_months"] = (
pd.to_datetime(model_df["account_open_month"], format="%b-%Y")
- model_df.EMPLOYMENT_START_DATE
) / np.timedelta64(1, "M")
model_df["employment_months"] = model_df["employment_months"].astype(int)
model_df.loc[model_df.employment_months < 0, "employment_months"] = -1
model_df = model_df.drop(
[
"BIRTH_DATE",
"EMPLOYMENT_START_DATE",
"account_open_month",
"DAYS_BIRTH",
"DAYS_EMPLOYED",
"FLAG_MOBIL",
],
axis=1,
)
featureType(model_df)
import warnings
warnings.filterwarnings("ignore")
income_type = (
model_df.groupby(["NAME_INCOME_TYPE", "label"])["NAME_INCOME_TYPE", "label"]
.size()
.reset_index(name="counts")
)
# Restucture
income_type = income_type.pivot(
index="NAME_INCOME_TYPE", columns="label", values="counts"
)
# Fill with 0
income_type = income_type.fillna(0).reset_index()
# Rename the columns
income_type.columns = ["Income_Type", "Label_0", "Label_1"]
# Calculate Bad Rate for each of the income type
income_type["pct_obs"] = (income_type["Label_0"] + income_type["Label_1"]) / (
sum(income_type["Label_0"]) + sum(income_type["Label_1"])
)
income_type["pct_label_0"] = income_type["Label_0"] / (
income_type["Label_0"] + income_type["Label_1"]
)
income_type["pct_label_1"] = income_type["Label_1"] / (
income_type["Label_0"] + income_type["Label_1"]
)
print(income_type)
# change missing value for OCCUPATION_TYPE
model_df.loc[model_df.OCCUPATION_TYPE == "", "OCCUPATION_TYPE"] = "NA"
# One hot Encoding using get_dummies function
model_df2 = pd.get_dummies(
model_df,
columns=[
"CODE_GENDER",
"FLAG_OWN_CAR",
"FLAG_OWN_REALTY",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_HOUSING_TYPE",
"OCCUPATION_TYPE",
],
)
len(model_df2)
import xgboost as xgb
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
model_df2.columns
# Features - exclude ID and Label columns
features = model_df2.iloc[:, 2:]
# Label - select only label column
label = model_df2.iloc[:, 1]
label
model_df2.sample(5)
model_df2.dtypes
from sklearn.model_selection import train_test_split
features_train, features_test, label_train, label_test = train_test_split(
features, label, test_size=0.2, random_state=557
)
from pycaret.regression import *
from pycaret.classification import *
reg_experiment = setup(
model_df2,
target="label",
session_id=42,
experiment_name="credit_card_approval",
normalize=True,
transformation=True,
remove_multicollinearity=True, # rop one of the two features that are highly correlated with each other
multicollinearity_threshold=0.5,
)
best_model = compare_models()
rf = create_model("rf")
rf = tune_model(rf, optimize="F1")
plot_model(rf)
plot_model(rf, plot="feature")
print(evaluate_model(rf))
interpret_model(rf)
pred_holdouts = predict_model(rf)
pred_holdouts.head()
save_model(tuned_catboost, model_name="./random_forest")
|
import sklearn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
data = pd.read_csv("weatherAUS_train.csv")
data_test = pd.read_csv("weatherAUS_test.csv")
data_test.head()
data.head()
data.shape
data.columns
data.describe()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le_count = 0
for col in data:
if data[col].dtype == "object":
print(col, len(list(data[col].unique())))
data["RainToday"].value_counts()
data["RainToday"].unique()
data_test["RainToday"].unique()
data_test["RainToday"] = data_test["RainToday"].fillna(value="No")
data = data[data["RainToday"].isin(["No", "Yes"])]
# data_test = data_test[data_test['RainToday'].isin(['No', 'Yes'])]
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le_count = 0
le.fit(data["RainToday"])
data["RainToday"] = le.transform(data["RainToday"])
le = LabelEncoder()
le_count = 0
le.fit(data_test["RainToday"])
data_test["RainToday"] = le.transform(data_test["RainToday"])
data["RainToday"].unique()
data.head()
import datetime
temp1 = data["Date"]
temp1 = temp1.apply(lambda s: datetime.datetime.strptime(s, "%Y-%m-%d").date())
data["month"] = temp1.apply(lambda s: s.month)
data["year"] = temp1.apply(lambda s: s.year)
data.drop(["Date"], 1, inplace=True)
import datetime
temp2 = data_test["Date"]
temp2 = temp2.apply(lambda s: datetime.datetime.strptime(s, "%Y-%m-%d").date())
data_test["month"] = temp2.apply(lambda s: s.month)
data_test["year"] = temp2.apply(lambda s: s.year)
data_test.drop(["Date"], 1, inplace=True)
data.head()
data.drop(["WindGustDir", "WindDir9am", "WindDir3pm"], 1, inplace=True)
data_test.drop(["WindGustDir", "WindDir9am", "WindDir3pm"], 1, inplace=True)
data.head()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le_count = 0
for col in data:
if data[col].dtype == "object":
print(col, len(list(data[col].unique())))
clmns = data.columns[data.isnull().any()]
missed = pd.DataFrame(
data[clmns].isnull().sum().sort_values(ascending=False) / data.shape[0],
columns=["% NULL"],
)
missed
corr = data.corr()
plt.figure(figsize=(25, 21))
sns.heatmap(corr, vmax=0.8, square=True, cmap="magma")
data.columns
# что-то сделать с зависимыми
data.drop(
["Pressure9am", "WindSpeed9am", "Temp9am", "Cloud9am", "Humidity9am"],
1,
inplace=True,
)
data_test.drop(
["Pressure9am", "WindSpeed9am", "Temp9am", "Cloud9am", "Humidity9am"],
1,
inplace=True,
)
def change_to_loc_month_mean(string):
loc_month_map = {}
for location in data["Location"].unique():
data_loc = data[data["Location"] == location]
for month in range(1, 13):
data_loc_month = data_loc[data_loc["month"] == month]
loc_month_map[location + str(month)] = data_loc_month[string].mean()
tmp = data["Location"] + data["month"].astype(str)
tmp_test = data_test["Location"] + data_test["month"].astype(str)
tmpstr = "LocMonth" + string
data[tmpstr] = tmp.map(loc_month_map)
data_test[tmpstr] = tmp_test.map(loc_month_map)
array = [
"MinTemp",
"MaxTemp",
"Rainfall",
"Evaporation",
"Sunshine",
"WindGustSpeed",
"WindSpeed3pm",
"Humidity3pm",
"Pressure3pm",
"Cloud3pm",
"Temp3pm",
"RainToday",
]
for string in array:
change_to_loc_month_mean(string)
data.head()
corr = data.corr()
plt.figure(figsize=(25, 21))
sns.heatmap(corr, vmax=0.8, square=True, cmap="magma")
clmns = data.columns[data.isnull().any()]
missed = pd.DataFrame(
data[clmns].isnull().sum().sort_values(ascending=False) / data.shape[0],
columns=["% NULL"],
)
missed
for col in [
"Sunshine",
"Evaporation",
"Cloud3pm",
"Pressure3pm",
"WindGustSpeed",
"Humidity3pm",
"Temp3pm",
"MaxTemp",
"MinTemp",
]:
col2 = "LocMonth" + col
data[col] = data[col].fillna(data[col2])
data_test = data_test.fillna(data_test[col2])
clmns = data.columns[data.isnull().any()]
missed = pd.DataFrame(
data[clmns].isnull().sum().sort_values(ascending=False) / data.shape[0],
columns=["% NULL"],
)
missed
data = data.fillna(value=0.0)
data_test = data_test.fillna(value=0.0)
data.drop(["Location"], 1, inplace=True)
data_test.drop(["Location"], 1, inplace=True)
clmns = data.columns[data.isnull().any()]
missed = pd.DataFrame(
data[clmns].isnull().sum().sort_values(ascending=False) / data.shape[0],
columns=["% NULL"],
)
missed
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
X = data.loc[:, data.columns != "RainTomorrow"]
y = data.loc[:, data.columns == "RainTomorrow"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
model = LogisticRegression(penalty="l1", C=1.0)
model.fit(X_train, y_train)
for pen in ["l1", "l2"]:
for c in np.logspace(-3, 3, 7):
model = LogisticRegression(penalty=pen, C=c)
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
print(pen, c)
print("Test: ", metrics.f1_score(y_test, y_test_pred, average="weighted"))
print("Train: ", metrics.f1_score(y_train, y_train_pred, average="weighted"))
model = LogisticRegression()
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
metrics.f1_score(y_train, y_train_pred, average="weighted")
metrics.f1_score(y_test, y_test_pred, average="weighted")
from sklearn.metrics import classification_report
print("Train metrics")
print(classification_report(y_train, y_train_pred))
print("Test metrics")
print(classification_report(y_test, y_test_pred))
y_train_pred_proba = model.predict_proba(X_train)
y_test_pred_proba = model.predict_proba(X_test)
from sklearn import metrics
score = metrics.roc_auc_score(y_test["RainTomorrow"].values, y_test_pred_proba[:, 1])
print("Test: ", score)
score = metrics.roc_auc_score(y_train["RainTomorrow"].values, y_train_pred_proba[:, 1])
print("Train: ", score)
from sklearn.metrics import roc_curve, auc
fpr = dict()
tpr = dict()
roc_auc = dict()
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_test_pred_proba[:, 1])
roc_auc = auc(fpr, tpr)
plt.figure(figsize=(7, 7))
lw = 2
plt.plot(
fpr, tpr, color="darkorange", lw=lw, label="ROC curve (area = %0.2f)" % roc_auc
)
plt.plot([0, 1], [0, 1], color="navy", lw=lw, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic example")
plt.legend(loc="lower right")
plt.show()
answer = model.predict(data_test.loc[:, data_test.columns != "Id"])
data_test["RainTomorrow"] = answer
data_test.head()
answer = data_test[["Id", "RainTomorrow"]]
answer.shape
answer.to_csv("answer.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
#!pip install catboost
from catboost import CatBoostClassifier
from sklearn.utils import class_weight
# Reading the Data Files
train = pd.read_csv("../input/jobathon-analytics-vidhya/train.csv")
print(train.shape)
train.head()
test = pd.read_csv("../input/jobathon-analytics-vidhya/test.csv")
print(test.shape)
test.head()
# **EDA**
sns.countplot(x="Response", data=train)
plt.show()
sns.countplot(x="Accomodation_Type", data=train, hue="Response")
plt.show()
sns.countplot(x="Health Indicator", data=train, hue="Response")
plt.show()
# **Filling missing values**
train.isnull().sum()
cols_with_missing = [col for col in train.columns if train[col].isnull().any()]
print("Columns with Missing values : ")
cols_with_missing
# Adding new columns to give the information about missing rows
for col in cols_with_missing:
train[col + "_was_missing"] = train[col].isnull()
train[col + "_was_missing"] = train[col + "_was_missing"].apply(
lambda x: 1 if x == True else 0
)
cols_with_missing = [col for col in test.columns if test[col].isnull().any()]
for col in cols_with_missing:
test[col + "_was_missing"] = test[col].isnull()
test[col + "_was_missing"] = test[col + "_was_missing"].apply(
lambda x: 1 if x == True else 0
)
# Filling missing values with a new category 'X'
train["Health Indicator"].fillna("X", inplace=True)
test["Health Indicator"].fillna("X", inplace=True)
train["Holding_Policy_Duration"].fillna("X", inplace=True)
test["Holding_Policy_Duration"].fillna("X", inplace=True)
train["Holding_Policy_Type"].fillna("X", inplace=True)
test["Holding_Policy_Type"].fillna("X", inplace=True)
train.isnull().sum().sum()
# **Feature Engineering**
# Making the column to be a categorical
train["Holding_Policy_Type"] = train["Holding_Policy_Type"].apply(lambda x: str(x))
test["Holding_Policy_Type"] = test["Holding_Policy_Type"].apply(lambda x: str(x))
# Making the column to be a categorical
train["Reco_Policy_Cat"] = train["Reco_Policy_Cat"].apply(lambda x: str(x))
test["Reco_Policy_Cat"] = test["Reco_Policy_Cat"].apply(lambda x: str(x))
# Getting a new feature age_diff
train["age_diff"] = train["Upper_Age"] - train["Lower_Age"]
test["age_diff"] = test["Upper_Age"] - test["Lower_Age"]
# Making a copies of original data
train_org = train.copy()
test_org = test.copy()
def response_coding(data, column, alpha):
"""This function performs reponse coding on the given column with laplace smoothing"""
responses = {}
train = train_org
unique_categories = list(train[column].unique())
for category in unique_categories:
prob_score = 0
if 1 in list(train[train[column] == category]["Response"].value_counts().index):
prob_score = (
train[train[column] == category]["Response"].value_counts()[1]
+ alpha * 1
) / (len(train[train[column] == category]["Response"]) + alpha * 2)
else:
prob_score = (alpha * 1) / (
len(train[train[column] == category]["Response"]) + alpha * 2
)
responses[category] = prob_score
data[column + "_response"] = data[column].apply(
lambda x: responses[x] if x in responses.keys() else 0
)
# Getting the reponse coded columns
response_coding_columns = [
"Region_Code",
"City_Code",
"Health Indicator",
"Holding_Policy_Duration",
"Reco_Policy_Cat",
"age_diff",
"Reco_Policy_Premium",
]
for col in response_coding_columns:
response_coding(train, col, 1)
response_coding(test, col, 1)
# Performing One hot Encoding on Categoricla columns
category_labels_dict = {}
category_columns = [
"Holding_Policy_Type",
"Reco_Policy_Cat",
"City_Code",
"Health Indicator",
]
for column in category_columns:
unq_categories = list(train[column].unique())
# dictionary=dict((value,index) for index,value in enumerate(unq_categories) )
category_labels_dict[column] = unq_categories
def get_dummies(data, column):
category_labels = category_labels_dict[column]
for category in category_labels:
data[column + "_" + category] = data[column].apply(
lambda x: 1 if x == category else 0
)
data.drop([column], axis=1, inplace=True)
for column in category_columns:
get_dummies(train, column)
get_dummies(test, column)
# Converting Holding_Policy_Duration into a numeric column
train["Holding_Policy_Duration"] = train["Holding_Policy_Duration"].replace("14+", "15")
test["Holding_Policy_Duration"] = test["Holding_Policy_Duration"].replace("14+", "15")
train["Holding_Policy_Duration"] = train["Holding_Policy_Duration"].apply(
lambda x: float(x) if x != "X" else 0
)
test["Holding_Policy_Duration"] = test["Holding_Policy_Duration"].apply(
lambda x: float(x) if x != "X" else 0
)
# Performing Label encoding on some columns
train["Accomodation_Type"] = train["Accomodation_Type"].apply(
lambda x: 1 if x == "Rented" else 0
)
test["Accomodation_Type"] = test["Accomodation_Type"].apply(
lambda x: 1 if x == "Rented" else 0
)
train["Reco_Insurance_Type"] = train["Reco_Insurance_Type"].apply(
lambda x: 1 if x == "Joint" else 0
)
test["Reco_Insurance_Type"] = test["Reco_Insurance_Type"].apply(
lambda x: 1 if x == "Joint" else 0
)
train["Is_Spouse"] = train["Is_Spouse"].apply(lambda x: 1 if x == "Yes" else 0)
test["Is_Spouse"] = test["Is_Spouse"].apply(lambda x: 1 if x == "Yes" else 0)
# Creating a new column is_joint_and_married
train["is_joint_and_married"] = train["Reco_Insurance_Type"] ^ train["Is_Spouse"]
test["is_joint_and_married"] = test["Reco_Insurance_Type"] ^ test["Is_Spouse"]
train.head()
test.head()
# **Training**
X_train = train.drop(["ID", "Response"], axis=1)
y_train = train[["Response"]]
X_test = test.drop(["ID"], axis=1)
# Creating train and test split
X_train, X_cv, y_train, y_cv = train_test_split(
X_train, y_train, stratify=y_train, test_size=0.3, random_state=42
)
print("X_train : ", X_train.shape, " y_train : ", y_train.shape)
print("X_cv : ", X_cv.shape, " y_cv : ", y_cv.shape)
print("X_test : ", X_test.shape)
scalar = StandardScaler()
columns_std = [
"City_Code_response",
"Region_Code_response",
"Upper_Age",
"Lower_Age",
"Health Indicator_response",
"Holding_Policy_Duration",
"Region_Code",
"Holding_Policy_Duration_response",
"Reco_Policy_Premium",
"age_diff",
"Reco_Policy_Cat_response",
"age_diff_response",
"Reco_Policy_Premium_response",
]
scalar.fit(X_train[columns_std])
X_train[columns_std] = scalar.transform(X_train[columns_std])
X_cv[columns_std] = scalar.transform(X_cv[columns_std])
X_test[columns_std] = scalar.transform(X_test[columns_std])
weights = class_weight.compute_class_weight(
"balanced", np.unique(y_train["Response"]), y_train["Response"]
)
print("Using class weights while training")
list(weights)
# Training catboost Classifier model
params = {
"loss_function": "Logloss",
"eval_metric": "AUC",
"verbose": 200,
"random_seed": 42,
"class_weights": list(weights),
}
cat_boost = CatBoostClassifier(
**params,
)
cat_boost.fit(X_train, y_train, eval_set=(X_cv, y_cv), use_best_model=True, plot=True)
print("Train AUC :", roc_auc_score(y_train, cat_boost.predict_proba(X_train)[:, 1]))
print("CV AUC :", roc_auc_score(y_cv, cat_boost.predict_proba(X_cv)[:, 1]))
feat_imp = [t for t in zip(X_train.columns, cat_boost.get_feature_importance())]
feat_imp_df = pd.DataFrame(feat_imp, columns=["feature", "importance"])
feat_imp_df = feat_imp_df.sort_values("importance", ascending=False)
print("Most important Features are :")
feat_imp_df = feat_imp_df[feat_imp_df["importance"] >= 1]
sns.barplot(x="importance", y="feature", data=feat_imp_df)
plt.show()
def predict(model, filename):
"""This function takes model and filename and gives predictions csv file"""
predicted_proba = model.predict_proba(X_test)
predicted_proba = np.asarray(predicted_proba)
predicted_proba = predicted_proba[:, 1]
predictions = pd.DataFrame(columns=["ID", "Response"])
predictions["ID"] = test["ID"]
predictions["Response"] = predicted_proba
predictions.to_csv(filename, index=False)
return predictions
predictions = predict(cat_boost, "submission.csv")
predictions.head()
|
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import DataLoader
from datasets import load_dataset, Dataset, Image
from huggingface_hub import HfApi, HfFolder, notebook_login
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# # 1. Train dataset
# ## 1.1 Load the dataset
ds_train = load_dataset("pphuc25/iBioHash50k_labels_150k")
ds_train = ds_train["train"]
print(ds_train)
# ## 1.2 EDA the train dataset
# ### a. labels count
print("Number of rows in dataset:")
print(len(ds_train))
print("Number of categories in dataset:")
print(len(ds_train.unique("labels")))
ds_train_labels = pd.Series(ds_train["labels"])
labels_count = ds_train_labels.value_counts()
# Sort the count based on the labels
labels_count = labels_count.sort_index()
print(labels_count)
labels_count_df = labels_count.reset_index()
labels_count_df.columns = ["label", "count"]
labels_count_df["per total (%)"] = labels_count_df["count"] / len(ds_train) * 100
labels_count_df.head(10)
### Top 5 most AND least freqeuent labels
print("Top 5 most counts")
print(labels_count_df.sort_values("count", ascending=False).head(5))
print("\n")
print("Top 5 least counts")
print(labels_count_df.sort_values("count", ascending=True).head(5))
# labels_count_top10.plot(x='label', y='count', kind='bar', legend=False)
# plt.title('Top 10 label counts in training dataset')
# plt.xlabel('Label')
# plt.ylabel('Count')
# plt.show()
# ### b. image shapes
# ds_train = ds_train.with_format('torch', device=device)
ds_train[0]["image"].size
# Define function to get image shape
## When use batched=True, each argument passed in map func is an entire batch
def get_img_shape(samples):
img_shapes = [img.size for img in samples["image"]]
return {"img_shapes": img_shapes}
# By default, the map function will return the entire dataset mapped with the new columns,
# so we should remove them
ds_img_shapes = ds_train.map(
get_img_shape, batched=True, num_proc=4, remove_columns=ds_train.column_names
)
np_img_shapes = np.array(ds_img_shapes["img_shapes"])
print(np_img_shapes.shape)
df_img_shapes = pd.DataFrame(np_img_shapes, columns=["w", "h"])
df_img_shapes["aspect_ratio_w:h"] = df_img_shapes["w"] / df_img_shapes["h"]
df_img_shapes.head()
df_img_shapes["aspect_ratio_w:h"].hist(grid=True, bins=50, range=(0, 3.5))
plt.xlabel("aspect ratio (w/h)")
plt.ylabel("count")
plt.show()
df_img_shapes.to_csv("ds_img_shapes.csv")
|
# # Movie Recommender System
# https://www.kaggle.com/datasets/rounakbanik/the-movies-dataset
import os
import pandas as pd
import numpy as np
from surprise import *
from surprise.model_selection import *
import matplotlib.pyplot as plt
# # Loading Dataset
movie_df = pd.read_csv("/kaggle/input/the-movies-dataset/ratings_small.csv")
movie_df
reader = Reader(rating_scale=(0.5, 5.0))
data = Dataset.load_from_df(movie_df[["userId", "movieId", "rating"]], reader)
trainset, testset = train_test_split(data, test_size=0.25)
# **Computing the average MAE and RMSE of the Probabilistic Matrix Factorization (PMF), Userbased Collaboratice Filtering, Item based Collaborative Filtering, under the 5-fold cross-validation**
user_based = KNNBasic(user_based=True)
item_based = KNNBasic(user_based=False)
pmf = SVD(biased=False)
metrics = ["rmse", "mae"]
user_based_results = cross_validate(user_based, data, measures=metrics, cv=5)
item_based_results = cross_validate(item_based, data, measures=metrics, cv=5)
pmf_results = cross_validate(pmf, data, measures=metrics, cv=5)
print("User-based CF RMSE:", user_based_results["test_rmse"].mean())
print("User-based CF MAE:", user_based_results["test_mae"].mean())
print("Item-based CF RMSE:", item_based_results["test_rmse"].mean())
print("Item-based CF MAE:", item_based_results["test_mae"].mean())
print("PMF RMSE:", pmf_results["test_rmse"].mean())
print("PMF MAE:", pmf_results["test_mae"].mean())
# # Cosine, Mean Squared Difference, and Pearson Similarites
user_based_cosine = KNNBasic(user_based=True, name="cosine")
user_based_msd = KNNBasic(user_based=True, name="msd")
user_based_pearson = KNNBasic(user_based=True, name="pearson")
item_based_cosine = KNNBasic(user_based=False, name="cosine")
item_based_msd = KNNBasic(user_based=False, name="msd")
item_based_pearson = KNNBasic(user_based=False, name="pearson")
user_based_cosine_results = cross_validate(
user_based_cosine, data, measures=metrics, cv=5
)
user_based_msd_results = cross_validate(user_based_msd, data, measures=metrics, cv=5)
user_based_pearson_results = cross_validate(
user_based_pearson, data, measures=metrics, cv=5
)
item_based_cosine_results = cross_validate(
item_based_cosine, data, measures=metrics, cv=5
)
item_based_msd_results = cross_validate(item_based_msd, data, measures=metrics, cv=5)
item_based_pearson_results = cross_validate(
item_based_pearson, data, measures=metrics, cv=5
)
user_based_scores = [
user_based_cosine_results,
user_based_msd_results,
user_based_pearson_results,
]
item_based_scores = [
item_based_cosine_results,
item_based_msd_results,
item_based_pearson_results,
]
user_based_scores
x_labels = ["Cosine", "MSD", "Pearson"]
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.boxplot([x["test_rmse"] for x in user_based_scores], labels=x_labels)
plt.title("User-Based")
plt.ylabel("RMSE")
plt.subplot(1, 2, 2)
plt.boxplot([x["test_rmse"] for x in item_based_scores], labels=x_labels)
plt.title("Item-Based")
plt.ylabel("RMSE")
plt.show()
x_labels = ["Cosine", "MSD", "Pearson"]
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.boxplot([x["test_mae"] for x in user_based_scores], labels=x_labels)
plt.title("User-Based")
plt.ylabel("MAE")
plt.subplot(1, 2, 2)
plt.boxplot([x["test_mae"] for x in item_based_scores], labels=x_labels)
plt.title("Item-Based")
plt.ylabel("MAE")
plt.show()
# # K Number of Neighbors
user_k_results = []
item_k_results = []
for k in range(1, 25):
user_based = KNNWithMeans(user_based=True, k=k)
item_based = KNNWithMeans(user_based=False, k=k)
user_results = cross_validate(user_based, data, measures=["rmse"], cv=5)
item_results = cross_validate(item_based, data, measures=["rmse"], cv=5)
user_k_results.append(user_results["test_rmse"].mean())
item_k_results.append(item_results["test_rmse"].mean())
plt.plot(range(1, 25), user_k_results, label="User-based")
plt.plot(range(1, 25), item_k_results, label="Item-based")
plt.xlabel("Number of K")
plt.ylabel("RMSE")
plt.legend()
plt.show()
best_user_k = user_k_results.index(min(user_k_results)) + 1
best_item_k = item_k_results.index(min(item_k_results)) + 1
print(
"Best K for user-based collaborative filtering:",
best_user_k,
"with a score of ",
min(user_k_results),
)
print(
"Best K for item-based collaborative filtering:",
best_item_k,
"with a score of ",
min(item_k_results),
)
|
# # Introduction
# In order to analyze the [stock market database](https://www.kaggle.com/datasets/paultimothymooney/stock-market-data) a process prior to the data analysis had to be done. This database consists of json and csv files arranged in a directory tree. Because of this the nature of the data is not clear at first.
# The main goal of this notebook is to create an strategy for an efficient data retrieval. In order to do this we will explore the structure of the directories first and then we will proceed to identify the particular features of every file in this directories.
# # Exploratory analysis
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os # directory access and manipulation
import json # json accsess, manipulation and parsing
import csv as csv_manager # csv accsess, manipulation and parsing
# I defined this function to be used a decorator in order to troubleshoot unexpected runtime errors (like those caused by deletion of directories)
def failproof_rtine(funct):
def wrapper(*args, **kwargs):
try:
return funct(*args, **kwargs)
except Exception as e:
print("In {}:".format(str(funct.__name__)))
print(e)
return wrapper
# ## Database basic structure
# The **stock market data** database is a document based database where the parent directory **stock_market_data** forks into four directories:
# * forbes2000
# * nyse
# * nasdaq
# * sp500
# Every one of this directories are related to a stock market. Each of this directories are subdivided into two subdirectories:
# **csv** and **json**.
#
# We define this "constants" to be used throughout this notebook
working_directory = "/kaggle/input/stock-market-data/stock_market_data"
stock_exchange_names = ["forbes2000", "nyse", "nasdaq", "sp500"]
# Each of the symbols (securities) in each stock market can be found in every of the json and csv directories. The next functions can be used to improve the access to this files as well as to pinpoint possible problems.
##ID 10
# return the names of all available symbols in a stock market (file extension has to be specified)
@failproof_rtine
def get_symbols_market(stock_exchange_name, extension="json"):
current_file_path = "/".join([working_directory, stock_exchange_name, extension])
current_file_names = os.listdir(current_file_path)
if extension == "json":
char_offset = -5
if extension == "csv":
char_offset = -4
return [
current_file_name[0:char_offset] for current_file_name in current_file_names
]
# this function relates stock markets to available symbols in a dictionary
@failproof_rtine
def get_symbols_relation(extension="json"):
return {
stock_exchange_name: get_symbols_market(stock_exchange_name, extension)
for stock_exchange_name in stock_exchange_names
}
# get a comprehensive list of symbols names (disregarding of the stock market)
@failproof_rtine
def get_symbols(extension="json", relation_data=None):
# if the stock market/symbol relation is provided we can skip this step
if relation_data is None:
current_data = get_symbols_relation(extension)
else:
print("cached")
current_data = relation_data
output_data = []
for stock_exchange_name in stock_exchange_names:
output_data += current_data[stock_exchange_name]
# remove duplicates
output_data = list(set(output_data))
output_data.sort()
return output_data
try:
print("We have {} json files".format(len(get_symbols(extension="json"))))
except:
print("No json files found")
try:
print("We have {} csv files".format(len(get_symbols(extension="csv"))))
except:
print("No json files found")
# Depending on the database version one could have more or less csv files than json files. This means that there are (possibly) some missing files.
# Now, let's take a look on the data that these files have.
@failproof_rtine
def get_symbol_raw_data(
stock_exchange_name, symbol_name, extension="json", verbose=False
):
current_file_path = "".join(
[
"/".join([working_directory, stock_exchange_name, extension, symbol_name]),
".",
extension,
]
)
if extension == "json":
try:
with open(current_file_path) as current_file:
output_data = json.load(current_file)
return output_data
except Exception as e:
if verbose:
print(e)
if extension == "csv":
try:
with open(current_file_path) as current_file:
output_data = csv_manager.DictReader(current_file)
return pd.DataFrame(output_data)
except Exception as e:
if verbose:
print(e)
# let's try this out for the amazon securities in the nasdaq stock market
amazon_json_data = get_symbol_raw_data("nasdaq", "AMZN", "json")
# There is more data on other attributes, but it is too large to be diplayed, we will deal with it later
display(amazon_json_data["chart"]["result"][0]["meta"])
# actually most of the dropped attributes can be found in the csv file (as we will show later)
amazon_csv_data = get_symbol_raw_data("nasdaq", "AMZN", "csv")
display(amazon_csv_data)
# The last method allow us to detect problems in data access (if the data file we try to access cannot be opened the output is None)
# We want to know how many files are corrupted according to the next criteria: A file is corrupted if
# * either its json or csv version cannot opened
# * either its json or csv version does not exist
mismatches = 0
num_scanned_files = 0
for stock_exchange_name in stock_exchange_names:
current_json_filedir = "/".join([working_directory, stock_exchange_name, "json"])
current_csv_filedir = "/".join([working_directory, stock_exchange_name, "csv"])
available_json_files = get_symbols_market(stock_exchange_name, extension="json")
num_scanned_files += len(available_json_files)
print("Scanning the directory:{}".format(stock_exchange_name))
for current_symbol in available_json_files:
# to see the full log set verbose=True
current_json_file_data = get_symbol_raw_data(
stock_exchange_name, current_symbol, "json"
)
current_csv_file_data = get_symbol_raw_data(
stock_exchange_name, current_symbol, "csv"
)
if (current_json_file_data is None) or (current_csv_file_data is None):
mismatches += 1
print("{} mismatches found in {} files".format(mismatches, num_scanned_files))
# print(available_json_files[0:1000])
# Before proceeding with a deeper database analysis, we define a way to pick random files from the database.
##ID 13
@failproof_rtine
def choose_random_symbol(extension="json", relation_data=None):
if relation_data is None:
current_data = get_symbols_relation(extension)
id_stock_market = np.random.randint(low=0, high=len(stock_exchange_names))
id_symbol = np.random.randint(
low=0, high=len(current_data[stock_exchange_names[id_stock_market]])
)
return (
stock_exchange_names[id_stock_market],
current_data[stock_exchange_names[id_stock_market]][id_symbol],
)
# TO DELETE
try:
currrent_stock_exchange_name, current_symbol = choose_random_symbol(extension="csv")
print(
"Showing data from \033[1m{}\033[0m in stock market \033[1m{}\033[0m".format(
current_symbol, currrent_stock_exchange_name
)
)
instance_symbol_data = get_symbol_raw_data(
currrent_stock_exchange_name, current_symbol, "csv", verbose=True
)
display(instance_symbol_data)
except Exception as e:
print(e)
# In this stage, several processes require traversing the whole database. This can take a lot of time. In order to estimate the run time for a process one could estimate the time for a smaller portion of it. Since this task can be needed a lot one could use a decorator to
# avoid code repetition
def timeout_callback(func):
def wrapper(*args, **kwargs):
initial_time = np.datetime64("now")
return_val = func(*args, **kwargs)
ending_time = np.datetime64("now")
print(
"Excecution time: {}ms".format(
(ending_time - initial_time) / np.timedelta64(1, "ms")
)
)
return return_val
return wrapper
# @timeout_callback
# def test_timeout_callback(eq_modulus):
# roots_num=0
# for i in range(1,1000000):
# if (i*i*i+i*i-i)%eq_modulus==1:
# roots_num+=1
# return roots_num
# print(test_timeout_callback(4))
# # Dictionary analysis's functions
# Going deeper into the structure of the data we need to identify the possible attributes (features) found in the json files.
# Since we parse the json files into (python data type) nested dictionaries we need to find the possible paths
# in every dictionary. In this context a path means a combination of keys until we get to an atomic data type. For instance the dictionary:
#
# {'a': {'aa': 2, 'ab': True, 'ac': 1.2},
# 'b': {'ba': -922, 'bb': 0.977, 'bc': True}}
# Has paths
# a/aa, a/ab, a/ac...
# We will add the type in the first stages of the analysis in order to determine the final data type(s) used. In this case we will have the paths:
# a/aa/Integer, a/ab/Boolean, a/ac/Numeric...
#
# a primitive in this case is defined as boolean,string,integer or numeric variable
# any other (including na/null) aren't considered primitives
def is_primitive(input_obj):
return (
isinstance(input_obj, bool)
or isinstance(input_obj, str)
or isinstance(input_obj, int)
or isinstance(input_obj, float)
)
# identified types include primitive types, na, list and dictionaries(objects)
# any other is labeled as unidentified
def is_unidentified_type(input_obj):
return not (
isinstance(input_obj, bool)
or isinstance(input_obj, str)
or isinstance(input_obj, int)
or isinstance(input_obj, float)
or isinstance(input_obj, dict)
or isinstance(input_obj, list)
)
# defines a type to string conversion.
def type_to_str(input_obj):
try:
if input_obj is None:
return "NA"
if isinstance(input_obj, bool):
return "Boolean"
if isinstance(input_obj, str):
return "String"
if isinstance(input_obj, int):
return "Integer"
if isinstance(input_obj, float):
return "Numeric"
if isinstance(input_obj, list):
return [type_to_str(entry) for entry in input_obj]
if isinstance(input_obj, dict):
return {
entry_name: type_to_str(input_obj[entry_name])
for entry_name in input_obj.keys()
}
except Exception as e:
print(e)
print(input_obj)
# there might be other types (like set) which are not considered here
return "Unidentified"
# transforms a nested list (a list which contains other lists) into a plain list
def list_reducer(input_list):
if isinstance(input_list, list):
output = []
for input_list_val in input_list:
if isinstance(input_list_val, list):
output.extend(list_reducer(input_list_val))
else:
output.append(input_list_val)
return output
else:
return input_list
# returns all the object types found inside an object or primitive
def get_all_paths(input_dict):
paths = []
if is_primitive(input_dict) or input_dict is None:
paths = type_to_str(input_dict)
if isinstance(input_dict, list):
paths = list_reducer(
[get_all_paths(input_dict_val) for input_dict_val in input_dict]
)
if isinstance(input_dict, dict):
for input_key in input_dict.keys():
if is_primitive(input_dict[input_key]) or (input_dict[input_key] is None):
paths.append("/".join([input_key, type_to_str(input_dict[input_key])]))
if isinstance(input_dict[input_key], dict) and (
len(input_dict[input_key].keys()) != 0
):
paths.extend(
[
"/".join([input_key, current_path])
for current_path in get_all_paths(input_dict[input_key])
]
)
elif isinstance(input_dict[input_key], list) and (
len(input_dict[input_key]) != 0
):
paths.extend(
[
"/".join([input_key, current_path])
for current_path in list_reducer(
get_all_paths(input_dict[input_key])
)
]
)
else:
continue
return paths
# drops the repetitions found in the latter function
def get_all_unique_paths(input_dict):
if input_dict is None:
return []
else:
return list(set(get_all_paths(input_dict)))
# Lets try this out on a file
current_symbol_data = get_symbol_raw_data("sp500", "ENS", "json")
get_all_unique_paths(current_symbol_data)
# Next, we express a set of paths in a condensed manner: Instead of having a list of paths we create a dictionary whose endpoints (atomic data types) are the data types of the attributes repressented by these paths.
#
import re
identifiable_types = ["NA", "Boolean", "String", "Integer", "Numeric", "Unidentified"]
def split_path(input_path):
return re.findall(r"[^/]+", input_path)
def is_valid_type(type_strfmt):
return type_strfmt in identifiable_types
def rectify_path(input_path):
keys_list = split_path(input_path)
if len(keys_list) == 0:
return
type_strfmt = keys_list[-1]
if not is_valid_type(type_strfmt):
keys_list.append("NA")
return rectify_path("/".join(keys_list))
return input_path
def initialize_dict_path(input_path, verbose=True):
output_dict = {}
keys_list = split_path(input_path)
if len(keys_list) == 0:
return
elif len(keys_list) == 1:
output_dict = {"Unamed variable": keys_list[-1]}
elif len(keys_list) == 2:
output_dict = {keys_list[0]: keys_list[1]}
else:
key_val_mapping = list(enumerate(keys_list[::-1]))
for key, value in key_val_mapping:
if key == 0:
output_dict = value
else:
if verbose:
print("({},{})".format(key, value))
output_dict = nest_dict({}, output_dict, sub_name=value)
return output_dict
def append_path_dict(input_path, input_dict={}):
output_dict = input_dict.copy()
keys_list = split_path(input_path)
if len(keys_list) == 0:
return
else:
track = []
temp = output_dict
for id_key in range(len(keys_list)):
try:
if keys_list[id_key] in temp.keys():
temp = temp[keys_list[id_key]]
if isinstance(temp, dict):
track.append((keys_list[id_key], temp))
else:
break
else:
break
except Exception as e:
print("append_path_dict error")
print(e)
if not bool(output_dict):
return initialize_dict_path(input_path, verbose=False)
if not id_key == (len(keys_list) - 1):
remaining_keys = keys_list[id_key:]
if len(track) == 0:
output_dict[keys_list[0]] = keys_list[1]
return output_dict
if len(remaining_keys) == 2:
tempy = track.pop()
tempy[1][keys_list[id_key]] = remaining_keys[1]
return output_dict
else:
dict_to_append = initialize_dict_path(
"/".join(remaining_keys[1:]), verbose=False
)
tempy = track.pop()
tempy[1][keys_list[id_key]] = dict_to_append
return output_dict
def nest_dict(input_dict_one, input_dict_two, sub_name="Unamed"):
output_dict = input_dict_one
output_dict[sub_name] = input_dict_two
return output_dict
def path_decoder(input_path_list):
output_dict = {}
if len(input_path_list) > 0:
input_path_transformed_list = [
rectify_path(input_path) for input_path in input_path_list
]
try:
for current_input_path in input_path_transformed_list:
if len(output_dict.keys()) == 0:
# print(current_input_path)
output_dict = append_path_dict(current_input_path)
else:
temp_dict = output_dict.copy()
output_dict = append_path_dict(current_input_path, temp_dict)
except Exception as e:
print(e)
return output_dict
path_decoder(get_all_unique_paths(current_symbol_data))
# The latter file could not be enough to find all the possible paths found in the json files (python directories)
# We will scan the whole database to find all the possible paths.
@timeout_callback
def find_all_available_paths():
path_set = set()
for stock_exchange_name in stock_exchange_names:
print(stock_exchange_name)
symbols_in_market = get_symbols_market(stock_exchange_name, extension="json")
for current_symbol in symbols_in_market:
current_data = get_symbol_raw_data(
stock_exchange_name, current_symbol, "json"
)
current_data = get_all_unique_paths(current_data)
path_set = path_set.union(set(current_data))
# print(path_set)
return path_set
my_json_paths = find_all_available_paths()
# From these we have a comprehensive list of paths from which lots of the start with **chart/result/events/splits|chart/result/events/dividends** or **chart/result/events/splits|chart/result/events/splits**. We will explore those later, by now let's look at the remaining paths
# path_decoder(get_all_unique_paths(kangaroo))
try:
posssible_main_paths = [
current_path
for current_path in my_json_paths
if len(
re.findall(
r"^(chart/result/events/splits|chart/result/events/dividends)",
current_path,
)
)
== 0
]
print(posssible_main_paths)
except Exception as e:
print(e)
# Looking at the paths starting with **chart/result/events/splits|chart/result/events/dividends** or **chart/result/events/splits|chart/result/events/splits**
# ...
# 'chart/result/events/splits/1402925400/denominator/Integer',
# 'chart/result/events/dividends/1305552600/date/Integer',
# 'chart/result/events/splits/1070461800/numerator/Integer',
# 'chart/result/events/splits/1495719000/numerator/Integer',
# 'chart/result/events/dividends/1224509400/amount/Numeric',
# ...
# we observe a pattern: every of those paths are followed by a number then a series of atributtes follow, the most interesting part can be found just after the data type. Let us confirm this observation by running the following code
set(
[
"".join((current_path.split("/"))[-2])
for current_path in my_json_paths
if len(
re.findall(
r"^(chart/result/events/splits|chart/result/events/dividends)",
current_path,
)
)
!= 0
]
)
# Further observations allow to conclude that:
# * every dividend (path starting with chart/result/events/dividends) has a date and an amount
# * every split (path starting withchart/result/events/splits) has a date a numerator and denominator and a split ratio
# * the key between the prefix (chart/result/events/splits or chart/result/events/dividends) and **date**, **denominator**, **numerator**, **splitRatio** matches the value of **date** (as we can verify by accessing several files)
# So far we have identified the structured of the json files. The features contained in them will be divided in seven categories:
# 1. **metadata**: Basic information about the current symbol:
# * chart/result/meta/chartPreviousClose,chart/result/meta/currency
# * chart/result/meta/dataGranularity,chart/result/meta/exchangeName
# * chart/result/meta/exchangeTimezoneName
# * chart/result/meta/firstTradeDate
# * chart/result/meta/gmtoffset,chart/result/meta/instrumentType
# * chart/result/meta/priceHint
# * chart/result/meta/range
# * chart/result/meta/regularMarketPrice
# * chart/result/meta/regularMarketTime
# * chart/result/meta/symbol
# * chart/result/meta/timezone
# * chart/result/meta/validRanges
# 2. **trade data**: the most important variables in trading operations
# * chart/result/timestamp
# * chart/result/indicators/quote/high
# * chart/result/indicators/quote/open
# * chart/result/indicators/quote/close
# * chart/result/indicators/quote/low
# * chart/result/indicators/quote/volume
# * chart/result/indicators/adjclose
# 3. **current trading period pre**:
# * chart/result/meta/currentTradingPeriod/pre/end
# * chart/result/meta/currentTradingPeriod/pre/gmtoffset
# * chart/result/meta/currentTradingPeriod/pre/start
# * chart/result/meta/currentTradingPeriod/pre/timezone
# 4. **current trading period regular**:
# * chart/result/meta/currentTradingPeriod/regular/end
# * chart/result/meta/currentTradingPeriod/regular/gmtoffset
# * chart/result/meta/currentTradingPeriod/regular/start
# * chart/result/meta/currentTradingPeriod/regular/timezone
# 5. **current trading period post** :
# * chart/result/meta/currentTradingPeriod/post/end
# * chart/result/meta/currentTradingPeriod/post/gmtoffset
# * chart/result/meta/currentTradingPeriod/post/start
# * chart/result/meta/currentTradingPeriod/post/timezone
# 6. **dividends**: chart/result/events/dividends
# 7. **splits**: chart/result/events/splits
# # Data extraction
# Now we have studied the nature and structure of the data we can define access methods that enable us to proceed to a deeper data analysis.
#
# allows to get retrieve data from a dictionary using a path
def get_attribute_in_path(path_string, input_dict, verbose=False, has_type_suffix=True):
keys_list = path_string.split("/")
# the data type is remove (if there is any)
if has_type_suffix:
keys_list.pop()
if isinstance(input_dict, list):
return [
get_attribute_in_path(
path_string, input_dict_entry, verbose, has_type_suffix
)
for input_dict_entry in input_dict
]
if len(keys_list) == 1:
if keys_list[0] in input_dict.keys():
return input_dict[keys_list[0]]
else:
current_node = input_dict
for id_key, key in enumerate(keys_list):
if isinstance(current_node, dict):
if key in current_node.keys():
current_node = current_node[key]
else:
if verbose:
print(
"get_attribute_in_path: wrong path {}".format(path_string)
)
return None
elif isinstance(current_node, list):
# print(len(keys_list[id_key+1:])==0)
return [
get_attribute_in_path(
"/".join(keys_list[id_key:]),
current_node_item,
verbose,
has_type_suffix,
)
for current_node_item in current_node
]
else:
return None
return current_node
# this function is defined to be used in other functions that traverse the whole database
def dict_has_attribute(path_string, input_dict, has_type_suffix=True):
outcome = not (
get_attribute_in_path(path_string, input_dict, has_type_suffix=has_type_suffix)
is None
)
return outcome
# It's time to test the new functions
currrent_stock_exchange_name, current_symbol = choose_random_symbol(extension="json")
print(currrent_stock_exchange_name)
print(current_symbol)
current_data_json = get_symbol_raw_data(
currrent_stock_exchange_name, current_symbol, "json"
)
current_data_csv = get_symbol_raw_data(
currrent_stock_exchange_name, current_symbol, "csv"
)
print(
"previous close: {}".format(
get_attribute_in_path(
"chart/result/meta/chartPreviousClose",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"currency: {}".format(
get_attribute_in_path(
"chart/result/meta/currency", current_data_json, has_type_suffix=False
)
)
)
print(
"data granularity: {}".format(
get_attribute_in_path(
"chart/result/meta/dataGranularity",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"exchange name: {}".format(
get_attribute_in_path(
"chart/result/meta/exchangeName", current_data_json, has_type_suffix=False
)
)
)
print(
"timezone name: {}".format(
get_attribute_in_path(
"chart/result/meta/exchangeTimezoneName",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"first trade date: {}".format(
get_attribute_in_path(
"chart/result/meta/firstTradeDate", current_data_json, has_type_suffix=False
)
)
)
print(
"gmtoffset: {}".format(
get_attribute_in_path(
"chart/result/meta/gmtoffset", current_data_json, has_type_suffix=False
)
)
)
print(
"instrument type: {}".format(
get_attribute_in_path(
"chart/result/meta/instrumentType", current_data_json, has_type_suffix=False
)
)
)
print(
"price hint: {}".format(
get_attribute_in_path(
"chart/result/meta/priceHint", current_data_json, has_type_suffix=False
)
)
)
print(
"range: {}".format(
get_attribute_in_path(
"chart/result/meta/range", current_data_json, has_type_suffix=False
)
)
)
print(
"regular market price: {}".format(
get_attribute_in_path(
"chart/result/meta/regularMarketPrice",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"regular market time: {}".format(
get_attribute_in_path(
"chart/result/meta/regularMarketTime",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"symbol: {}".format(
get_attribute_in_path(
"chart/result/meta/symbol", current_data_json, has_type_suffix=False
)
)
)
print(
"timezone: {}".format(
get_attribute_in_path(
"chart/result/meta/timezone", current_data_json, has_type_suffix=False
)
)
)
print(
"valid Ranges: {}".format(
get_attribute_in_path(
"chart/result/meta/validRanges", current_data_json, has_type_suffix=False
)
)
)
# print('timestamp: {}'.format(get_attribute_in_path('chart/result/timestamp',current_data_json,has_type_suffix=False)))
print(
"current trading period post end: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/post/end",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period post gmtoffset: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/post/gmtoffset",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period post start: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/post/start",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period post timezone: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/post/timezone",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period pre end: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/pre/end",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period pre gmtoffset: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/pre/gmtoffset",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period pre start: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/pre/start",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period pre timezone: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/pre/timezone",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period regular end: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/regular/end",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period regular gmtoffset: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/regular/gmtoffset",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period regular start: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/regular/start",
current_data_json,
has_type_suffix=False,
)
)
)
print(
"current trading period regular timezone: {}".format(
get_attribute_in_path(
"chart/result/meta/currentTradingPeriod/regular/timezone",
current_data_json,
has_type_suffix=False,
)
)
)
current_data_json_splits = get_attribute_in_path(
"chart/result/events/splits", current_data_json, has_type_suffix=False
)
display(current_data_json_splits[0])
current_data_json_dividends = get_attribute_in_path(
"chart/result/events/dividends", current_data_json, has_type_suffix=False
)
display(current_data_json_dividends[0])
current_data_json_date = get_attribute_in_path(
"chart/result/timestamp", current_data_json, has_type_suffix=False
)
current_data_json_high = get_attribute_in_path(
"chart/result/indicators/quote/high", current_data_json, has_type_suffix=False
)
current_data_json_open = get_attribute_in_path(
"chart/result/indicators/quote/open", current_data_json, has_type_suffix=False
)
current_data_json_close = get_attribute_in_path(
"chart/result/indicators/quote/close", current_data_json, has_type_suffix=False
)
current_data_json_low = get_attribute_in_path(
"chart/result/indicators/quote/low", current_data_json, has_type_suffix=False
)
current_data_json_volume = get_attribute_in_path(
"chart/result/indicators/quote/volume", current_data_json, has_type_suffix=False
)
current_data_json_adjclose = get_attribute_in_path(
"chart/result/indicators/adjclose/adjclose",
current_data_json,
has_type_suffix=False,
)
try:
current_data_json_close = list_reducer(current_data_json_close)
current_data_json_open = list_reducer(current_data_json_open)
current_data_json_high = list_reducer(current_data_json_high)
current_data_json_low = list_reducer(current_data_json_low)
current_data_json_volume = list_reducer(current_data_json_volume)
current_data_json_adjclose = list_reducer(current_data_json_adjclose)
current_data_json_date = list_reducer(current_data_json_date)
# this has the same information (could be more) as the csv file (if exists)
current_data_csv_alt = pd.DataFrame(
{
"date": pd.to_datetime(current_data_json_date, unit="s"),
"low": current_data_json_low,
"open": current_data_json_open,
"volume": current_data_json_volume,
"high": current_data_json_high,
"close": current_data_json_close,
"adjclose": current_data_json_adjclose,
}
)
display(current_data_csv_alt.tail(10))
except Exception as e:
print(e)
try:
# note that current_data_csv could be None (if no csv was found)
display(current_data_csv.tail(10))
except Exception as e:
print(e)
# this method improve the ones we have in that the output is a little bit more what we would expect
def path_query_debug(input_dict, path_name, label):
try:
# the list reducer function gets rid of redundant nested class (things like [[[9,0]]] to [9,0])
output_data = list_reducer(
get_attribute_in_path(path_name, input_dict, has_type_suffix=False)
)
# the previous methods struggle with attributes that have lists, this parts allow to fix that
if isinstance(output_data, list):
# In most cases the output of previous methods is an array with one element. In this cases we usually expect only that
# value as output
if len(output_data) == 1:
output_data = output_data[0]
if output_data is None:
error_found = True
else:
error_found = False
except Exception as e:
print(e)
output_data = None
error_found = True
return error_found, label, output_data
path_query_debug(current_data_json, "chart/result/meta/symbol", "symbol")
# This function is manly used to spot errors in data extraction, this is a way in which can be used
def get_data_json_final(input_dict):
output_data = []
output_data.append(
path_query_debug(
input_dict, "chart/result/meta/chartPreviousClose", "previous close"
)
)
output_data.append(
path_query_debug(input_dict, "chart/result/meta/currency", "currency")
)
output_data.append(
path_query_debug(
input_dict, "chart/result/meta/dataGranularity", "data granularity"
)
)
output_data.append(
path_query_debug(input_dict, "chart/result/meta/exchangeName", "exchange name")
)
output_data.append(
path_query_debug(
input_dict, "chart/result/meta/exchangeTimezoneName", "timezone name"
)
)
output_data.append(
path_query_debug(
input_dict, "chart/result/meta/firstTradeDate", "first trade date"
)
)
output_data.append(
path_query_debug(input_dict, "chart/result/meta/gmtoffset", "gmtoffset")
)
output_data.append(
path_query_debug(
input_dict, "chart/result/meta/instrumentType", "instrument type"
)
)
output_data.append(
path_query_debug(input_dict, "chart/result/meta/priceHint", "price hint")
)
output_data.append(path_query_debug(input_dict, "chart/result/meta/range", "range"))
output_data.append(
path_query_debug(
input_dict, "chart/result/meta/regularMarketPrice", "regular market price"
)
)
output_data.append(
path_query_debug(
input_dict, "chart/result/meta/regularMarketTime", "regular market time"
)
)
output_data.append(
path_query_debug(input_dict, "chart/result/meta/symbol", "symbol")
)
output_data.append(
path_query_debug(input_dict, "chart/result/meta/timezone", "timezone")
)
output_data.append(
path_query_debug(input_dict, "chart/result/meta/validRanges", "valid Ranges")
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/post/end",
"current trading period post end",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/post/gmtoffset",
"current trading period post gmtoffset",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/post/start",
"current trading period post start",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/post/timezone",
"current trading period post timezone",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/pre/end",
"current trading period pre end",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/pre/gmtoffset",
"current trading period pre gmtoffset",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/pre/start",
"current trading period pre start",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/pre/timezone",
"current trading period pre timezone",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/regular/end",
"current trading period regular end",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/regular/gmtoffset",
"current trading period regular gmtoffset",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/regular/start",
"current trading period regular start",
)
)
output_data.append(
path_query_debug(
input_dict,
"chart/result/meta/currentTradingPeriod/regular/timezone",
"current trading period regular timezone",
)
)
output_data.append(
path_query_debug(
current_data_json, "chart/result/events/dividends", "dividends"
)
)
output_data.append(
path_query_debug(current_data_json, "chart/result/events/splits", "splits")
)
return output_data
pd.DataFrame(
get_data_json_final(current_data_json), columns=["Error", "Field", "Value"]
)
# Finally, by using all we know from the database structure, we define the functions used in data extraction.
def path_query(input_dict, path_name, label):
try:
output_data = list_reducer(
get_attribute_in_path(path_name, input_dict, has_type_suffix=False)
)
if isinstance(output_data, list):
if len(output_data) == 1:
output_data = output_data[0]
except Exception as e:
print(e)
output_data = None
return label, output_data
def get_json_metadata(stock_exchange_name, symbol):
input_dict = get_symbol_raw_data(
currrent_stock_exchange_name, current_symbol, "json"
)
output_data = []
output_data.append(
path_query(input_dict, "chart/result/meta/chartPreviousClose", "previous close")
)
output_data.append(path_query(input_dict, "chart/result/meta/currency", "currency"))
output_data.append(
path_query(input_dict, "chart/result/meta/dataGranularity", "data granularity")
)
output_data.append(
path_query(input_dict, "chart/result/meta/exchangeName", "exchange name")
)
output_data.append(
path_query(
input_dict, "chart/result/meta/exchangeTimezoneName", "timezone name"
)
)
output_data.append(
path_query(input_dict, "chart/result/meta/firstTradeDate", "first trade date")
)
output_data.append(
path_query(input_dict, "chart/result/meta/gmtoffset", "gmtoffset")
)
output_data.append(
path_query(input_dict, "chart/result/meta/instrumentType", "instrument type")
)
output_data.append(
path_query(input_dict, "chart/result/meta/priceHint", "price hint")
)
output_data.append(path_query(input_dict, "chart/result/meta/range", "range"))
output_data.append(
path_query(
input_dict, "chart/result/meta/regularMarketPrice", "regular market price"
)
)
output_data.append(
path_query(
input_dict, "chart/result/meta/regularMarketTime", "regular market time"
)
)
output_data.append(path_query(input_dict, "chart/result/meta/symbol", "symbol"))
output_data.append(path_query(input_dict, "chart/result/meta/timezone", "timezone"))
output_data.append(
path_query(input_dict, "chart/result/meta/validRanges", "valid Ranges")
)
output_data = {
output_data_entry[0]: output_data_entry[1] for output_data_entry in output_data
}
return output_data
# type_period must be pre, regular or post
def get_trading_period(stock_exchange_name, symbol, type_period):
input_dict = get_symbol_raw_data(
currrent_stock_exchange_name, current_symbol, "json"
)
string_query = "".join(["chart/result/meta/currentTradingPeriod/", type_period])
type_period_label = "".join([type_period, " trading period"])
return path_query(input_dict, string_query, type_period_label)[1]
def get_symbol_splits(stock_exchange_name, symbol):
input_dict = get_symbol_raw_data(
currrent_stock_exchange_name, current_symbol, "json"
)
return path_query(input_dict, "chart/result/events/splits", "splits")[1]
def get_symbol_dividends(stock_exchange_name, symbol):
input_dict = get_symbol_raw_data(
currrent_stock_exchange_name, current_symbol, "json"
)
return path_query(input_dict, "chart/result/events/dividends", "dividends")[1]
def get_trading_data(stock_exchange_name, symbol):
input_dict = get_symbol_raw_data(
currrent_stock_exchange_name, current_symbol, "json"
)
input_dict_date = get_attribute_in_path(
"chart/result/timestamp", input_dict, has_type_suffix=False
)
input_dict_high = get_attribute_in_path(
"chart/result/indicators/quote/high", input_dict, has_type_suffix=False
)
input_dict_open = get_attribute_in_path(
"chart/result/indicators/quote/open", input_dict, has_type_suffix=False
)
input_dict_close = get_attribute_in_path(
"chart/result/indicators/quote/close", input_dict, has_type_suffix=False
)
input_dict_low = get_attribute_in_path(
"chart/result/indicators/quote/low", input_dict, has_type_suffix=False
)
input_dict_volume = get_attribute_in_path(
"chart/result/indicators/quote/volume", input_dict, has_type_suffix=False
)
input_dict_adjclose = get_attribute_in_path(
"chart/result/indicators/adjclose/adjclose", input_dict, has_type_suffix=False
)
input_dict_close = list_reducer(input_dict_close)
input_dict_open = list_reducer(input_dict_open)
input_dict_high = list_reducer(input_dict_high)
input_dict_low = list_reducer(input_dict_low)
input_dict_volume = list_reducer(input_dict_volume)
input_dict_adjclose = list_reducer(input_dict_adjclose)
input_dict_date = list_reducer(input_dict_date)
output_data = pd.DataFrame(
{
"date": pd.to_datetime(input_dict_date, unit="s"),
"low": input_dict_low,
"open": input_dict_open,
"volume": input_dict_volume,
"high": input_dict_high,
"close": input_dict_close,
"adjclose": input_dict_adjclose,
}
)
return output_data
display(get_json_metadata(currrent_stock_exchange_name, current_symbol))
display(get_trading_period(currrent_stock_exchange_name, current_symbol, "pre"))
display(get_trading_period(currrent_stock_exchange_name, current_symbol, "regular"))
display(get_trading_period(currrent_stock_exchange_name, current_symbol, "post"))
display(get_symbol_splits(currrent_stock_exchange_name, current_symbol))
display(get_symbol_dividends(currrent_stock_exchange_name, current_symbol))
display(get_trading_data(currrent_stock_exchange_name, current_symbol))
# # Notes
# One could use the pd.to_datetime function to transform the timestamps to (python) [time](https://docs.python.org/3/library/time.html)
# objects like this
data_json_trad_pre = get_trading_period(
currrent_stock_exchange_name, current_symbol, "pre"
)
data_json_trad_pre_start = pd.to_datetime(data_json_trad_pre["start"], unit="s")
print("trade pre trading period start {}".format(data_json_trad_pre_start))
print(
"trade pre trading period start {} (in Hour:Minute format)".format(
data_json_trad_pre_start.strftime("%H:%M")
)
)
# Use the gmtoffset feature to standarize all the times to 0 GMT:
data_json_trad_pre_start = pd.to_datetime(
data_json_trad_pre["start"] + data_json_trad_pre["gmtoffset"], unit="s"
)
print("trade pre trading period start {}".format(data_json_trad_pre_start))
print(
"trade pre trading period start {} (in Hour:Minute format)".format(
data_json_trad_pre_start.strftime("%H:%M")
)
)
# Or like this
def test_dividends(dividends):
if not (dividends is None):
for dividend_date in dividends.keys():
print(
"date: {}, amount:{}".format(
pd.to_datetime(dividend_date, unit="s"),
dividends[dividend_date]["amount"],
)
)
def test_splits(splits):
if not (splits is None):
for split_date in splits.keys():
print(
"date: {}, numerator:{}, denominator:{}, split ratio:{}".format(
pd.to_datetime(split_date, unit="s"),
splits[split_date]["numerator"],
splits[split_date]["denominator"],
splits[split_date]["splitRatio"],
)
)
test_dividends(get_symbol_dividends(currrent_stock_exchange_name, current_symbol))
test_splits(get_symbol_splits(currrent_stock_exchange_name, current_symbol))
|
# Some imports first
import numpy as np # for storing imaging data as matrices
import matplotlib.pyplot as plt # for basic plotting
import os
import copy
import cv2
import torch
try:
import lightning as L
from lion_pytorch import Lion
from segmentation_models_pytorch import Unet
except:
# https://github.com/Lightning-AI/lightning-flash/issues/1489
# https://www.kaggle.com/code/jirkaborovec/tract-segm-baseline-flash-unet-albumentation
import lightning as L
from lion_pytorch import Lion
from segmentation_models_pytorch import Unet
import albumentations as A
from albumentations.pytorch import ToTensorV2
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torchmetrics
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
X_train = np.load("/kaggle/input/scml-segmentation-challenge/X_train.npy")
y_train = np.load("/kaggle/input/scml-segmentation-challenge/y_train.npy")
X_test = np.load("/kaggle/input/scml-segmentation-challenge/X_test.npy")
print(f"Train images shape:{X_train.shape}")
print(f"Train masks shape:{y_train.shape}")
print(f"Test shape:{X_test.shape}")
def transparent_cmap(cmap, N=255):
"Copy colormap and set alpha values"
mycmap = copy.deepcopy(cmap)
mycmap._init()
mycmap._lut[:, -1] = np.linspace(0, 0.8, N + 4)
return mycmap
# Use base cmap to create transparent
mycmap = transparent_cmap(plt.cm.Reds)
def visualise_pred(img, seg, seg2=None):
# Creating kidney masks
kidney_mask = copy.deepcopy(seg)
# Plotting the CT scan itself
plt.imshow(img, cmap="gray")
# Potting predictions
plt.contourf(
range(0, 512), range(0, 512), np.reshape(seg, (512, 512)), 15, cmap=mycmap
)
plt.contour(seg > 0.5, [0.5], linewidths=1.5, colors="white")
# Adding countours
plt.contour(kidney_mask, linewidths=1, colors=["cyan"])
if seg2 is not None:
plt.contour(seg2, linewidths=1, colors=["red"])
plt.tight_layout()
plt.axis("off")
def visualise_predictions(X, y, predictions, size=16):
indeces = np.random.randint(0, len(X), size=size)
plt.figure(figsize=(12, 12))
for i, index in enumerate(indeces):
plt.subplot(int(np.sqrt(size)), int(np.sqrt(size)), i + 1)
index = np.random.randint(0, len(X))
# Creating kidney masks
kidney_mask = copy.deepcopy(y[index])
kidney_mask[kidney_mask != 1] = 0
# Creating tumor masks
tumor_mask = copy.deepcopy(y[index])
tumor_mask[tumor_mask != 2] = 0
# Plotting the CT scan itself
plt.imshow(X[index], cmap="gray")
# Potting predictions
plt.contourf(
range(0, 512),
range(0, 512),
np.reshape(predictions[index], (512, 512)),
15,
cmap=mycmap,
)
plt.contour(predictions[index] > 0.5, [0.5], linewidths=1.5, colors="white")
# Adding countours
plt.contour(kidney_mask, linewidths=1, colors=["cyan"])
plt.contour(tumor_mask, linewidths=1, colors=["red"])
plt.tight_layout()
plt.axis("off")
plt.subplots_adjust(wspace=0.01, hspace=0.01)
return None
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(
X_train.astype(int), y_train.astype(int), test_size=0.05, random_state=42
)
X_train.shape, X_val.shape, y_train.shape, y_val.shape
np.unique(y_train) # 0, 1, 2
# https://www.kaggle.com/code/jirkaborovec/tract-segm-eda-flash-deeplab-albumentation#Lightning%E2%9A%A1Flash-&-DeepLab-v3-&-albumentations
class NormImage(A.DualTransform):
def __init__(
self, quantile: float = 0.01, norm: bool = True, always_apply=False, p=1
):
super().__init__(always_apply, p)
self.quantile = quantile
self.norm = norm
def apply(self, img, **params):
if self.quantile > 0:
q_low, q_high = np.percentile(
img, [self.quantile * 100, (1 - self.quantile) * 100]
)
img = np.clip(img, q_low, q_high)
if self.norm:
v_min, v_max = np.min(img), np.max(img)
img = (img - v_min) / float(v_max - v_min)
return img
def apply_to_mask(self, mask, **params):
# Bounding box coordinates are scale invariant
return mask
class SegmentationDataset(Dataset):
def __init__(self, images, masks=None, transforms=None):
self.images = images
self.masks = masks
self.transforms = transforms
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image = self.images[idx]
if self.masks is not None:
mask = self.masks[idx]
transformed = self.transforms(image=image, mask=mask)
image = transformed["image"]
mask = transformed["mask"]
# mask = mask.unsqueeze(0)
mask = F.one_hot(mask.long(), num_classes=3).permute(2, 0, 1)
return image.float(), mask.float()
else:
transformed = self.transforms(image=image)
return transformed["image"].float()
image_size = (512, 512)
train_transforms = A.Compose(
[
NormImage(always_apply=True),
A.Resize(*image_size),
A.VerticalFlip(p=0.5),
A.HorizontalFlip(p=0.5),
A.ShiftScaleRotate(shift_limit=0.15, scale_limit=0.05, rotate_limit=15, p=0.5),
A.RandomGamma(gamma_limit=(90, 110), p=0.5),
A.ElasticTransform(
alpha=1,
sigma=50,
alpha_affine=50,
interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REFLECT_101,
p=0.5,
),
A.GaussianBlur(blur_limit=(3, 7), p=0.5),
ToTensorV2(),
]
)
val_transforms = A.Compose(
[
NormImage(always_apply=True),
A.Resize(*image_size),
ToTensorV2(),
]
)
train_dataloader = DataLoader(
SegmentationDataset(X_train, y_train, train_transforms),
batch_size=8,
shuffle=True,
num_workers=2,
)
val_dataloader = DataLoader(
SegmentationDataset(X_val, y_val, val_transforms),
batch_size=8,
shuffle=False,
num_workers=2,
)
# Just checking the dataloader
def show_batch(data_loader, batch, class_id=1):
import itertools
from torchvision.utils import draw_segmentation_masks, make_grid
plt.rcParams["figure.figsize"] = [20, 15]
images, masks = list(itertools.islice(data_loader, batch, batch + 1))[0]
masks_list = []
for image, mask in zip(images, masks):
image = image.repeat_interleave(3, dim=0)
masked = draw_segmentation_masks(
(image * 255).byte(), mask[class_id, ...].bool(), alpha=0.5, colors="red"
)
masks_list.append(masked)
print("image shape (NB! dim=0 repeated 3x)", image.shape)
print("mask shape", mask.shape)
grid = make_grid(masks_list, nrow=6)
plt.imshow(grid.permute(1, 2, 0))
plt.show()
# print('train class 1')
# show_batch(train_dataloader, batch=0)
# print('train class 2')
# show_batch(train_dataloader, batch=0, class_id=2)
import torch.nn as nn
from torchmetrics import Metric
from composer import Trainer
from composer.models import ComposerModel
from composer.optim import DecoupledAdamW
from composer.metrics.metrics import Dice
# https://docs.mosaicml.com/en/latest/examples/medical_image_segmentation.html
class LossMetric(Metric):
"""Turns any torch.nn Loss Module into distributed torchmetrics Metric."""
def __init__(self, loss, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.loss = loss
self.add_state("sum_loss", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total_batches", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds, target):
"""Update the state with new predictions and targets."""
# Loss calculated over samples/batch, accumulate loss over all batches
self.sum_loss += self.loss(preds, target)
self.total_batches += 1
def compute(self):
"""Aggregate state over all processes and compute the metric."""
# Return average loss over entire validation dataset
return self.sum_loss / self.total_batches
#### https://github.com/hubutui/DiceLoss-PyTorch/blob/master/loss.py ####
class BinaryDiceLoss(nn.Module):
"""Dice loss of binary class
Args:
smooth: A float number to smooth loss, and avoid NaN error, default: 1
p: Denominator value: \sum{x^p} + \sum{y^p}, default: 2
predict: A tensor of shape [N, *]
target: A tensor of shape same with predict
reduction: Reduction method to apply, return mean over batch if 'mean',
return sum if 'sum', return a tensor of shape [N,] if 'none'
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, smooth=1, p=2, reduction="mean"):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert (
predict.shape[0] == target.shape[0]
), "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.smooth
loss = 1 - num / den
if self.reduction == "mean":
return loss.mean()
elif self.reduction == "sum":
return loss.sum()
elif self.reduction == "none":
return loss
else:
raise Exception("Unexpected reduction {}".format(self.reduction))
class DiceLoss(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A tensor of shape [N, C, *]
target: A tensor of same shape with predict
other args pass to BinaryDiceLoss
Return:
same as BinaryDiceLoss
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(DiceLoss, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
assert predict.shape == target.shape, "predict & target shape do not match"
dice = BinaryDiceLoss(**self.kwargs)
total_loss = 0
predict = F.softmax(predict, dim=1)
for i in range(target.shape[1]):
if i != self.ignore_index:
dice_loss = dice(predict[:, i], target[:, i])
if self.weight is not None:
assert (
self.weight.shape[0] == target.shape[1]
), "Expect weight shape [{}], get[{}]".format(
target.shape[1], self.weight.shape[0]
)
dice_loss *= self.weights[i]
total_loss += dice_loss
return total_loss / target.shape[1]
#######################################################################################
class SegModel(ComposerModel):
def __init__(
self,
encoder_name="resnet34",
encoder_weights="imagenet",
in_channels=1,
classes=3,
loss=DiceLoss(ignore_index=0),
):
super().__init__()
self.model = Unet(
encoder_name=encoder_name,
encoder_weights=encoder_weights, # use `imagenet` pre-trained weights for encoder initialization
in_channels=in_channels, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=classes, # model output channels (number of classes in your dataset)
)
self.criterion = loss
self.train_loss = LossMetric(loss)
self.train_dice = Dice(num_classes=classes - 1)
self.val_loss = LossMetric(loss)
self.val_dice = Dice(num_classes=classes - 1)
def forward(self, batch):
image, target = batch
return self.model(image)
def loss(self, outputs, batch):
_, targets = batch
return self.criterion(outputs, targets)
def get_metrics(self, is_train: bool = False):
if is_train:
return {"DiceLoss": self.train_loss, "Dice": self.train_dice}
else:
return {"DiceLoss": self.val_loss, "Dice": self.val_dice}
def update_metric(self, batch, outputs, metric):
_, targets = batch
metric.update(outputs, targets)
epochs = 150
model = SegModel()
test_input = torch.from_numpy(X_train[0].astype(int)).float().unsqueeze(0).unsqueeze(0)
test_pred = model([test_input, None])
test_pred.shape, test_pred.dtype, test_input.shape
from composer.algorithms import (
SqueezeExcite,
CutOut,
LabelSmoothing,
ProgressiveResizing,
ChannelsLast,
SAM,
GradientClipping,
BlurPool,
GyroDropout,
)
from composer.optim import DecoupledAdamW
from composer.loggers import InMemoryLogger
from composer.optim.scheduler import CosineAnnealingWithWarmupScheduler
logger = InMemoryLogger()
optimizer = DecoupledAdamW(model.parameters(), lr=1e-3)
blurpool = BlurPool(replace_convs=True, replace_maxpools=True)
progressive_resizing_algorithm = ProgressiveResizing(
mode="resize", initial_scale=0.5, resize_targets=True, size_increment=32
)
scheduler = CosineAnnealingWithWarmupScheduler(t_warmup="5ep")
algorithms = [
SqueezeExcite(min_channels=128, latent_channels=64),
progressive_resizing_algorithm,
ChannelsLast(),
SAM(),
CutOut(length=0.5),
LabelSmoothing(smoothing=0.1),
blurpool,
GyroDropout(p=0.5, sigma=1024, tau=8),
]
# trainer = L.Trainer(max_epochs=epochs, accelerator='gpu', devices=torch.cuda.device_count(), precision=16, callbacks=[checkpoint_callback])
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=val_dataloader,
max_duration="100ep",
optimizers=optimizer,
device="gpu",
precision="amp",
seed=69,
algorithms=algorithms,
loggers=[logger],
schedulers=scheduler,
)
trainer.fit()
def plot_history(logger, metric="metrics/eval/DiceLoss"):
timeseries = logger.data[metric]
item = []
for time in timeseries:
try:
item.append(time[1].cpu().numpy())
except:
item.append(time[1])
plt.plot(item)
plt.title(f"metric: {metric}")
plt.show()
# plot_history(logger)
# plot_history(logger, metric='metrics/eval/Dice')
# from lightning.pytorch.tuner.tuning import Tuner
# tuner = Tuner(trainer)
# lr_find = tuner.lr_find(model, min_lr=1e-8, max_lr=1, early_stop_threshold=None)
# print(lr_find.suggestion())
# lr_find.plot(suggest=True)
# 0.0007585775750291836
from numpy.random import Generator, PCG64
# your great code for segmenting tumors comes here
# at the moment I am just assigning random masks
# from `y_train` to test images.
def segment_tumors(images):
rng = Generator(PCG64(1))
tumors_segmented = y_train[
rng.choice(range(y_train.shape[0]), size=images.shape[0]), :, :
]
return tumors_segmented
predictions_test = segment_tumors(X_test)
predictions_test.shape
from tqdm import tqdm
# predictions_test = np.empty_like(predictions_test)
test_dataset = SegmentationDataset(X_test, transforms=val_transforms)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=2)
model.eval()
predictions = []
with torch.no_grad():
for (
image
) in test_dataloader: # tqdm(test_dataloader, desc="Inference", unit="batch"):
# image = image.to(model.device)
image = image.cuda()
logits = model([image, None])
probs = torch.sigmoid(logits)
# (probs > 0.5).int()
predictions.extend(probs.cpu().numpy()) # .squeeze(0))
predictions = np.array(predictions)
num_val_samples = 5
val_dataset = SegmentationDataset(X_val, y_val, transforms=val_transforms)
val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=2)
# Load a few examples from the val_dataloader
val_iter = iter(val_dataloader)
val_examples = [next(val_iter) for _ in range(num_val_samples)]
# Run inference on the validation examples
model.eval()
val_predictions = []
threshold = 0.5
with torch.no_grad():
for i, batch in enumerate(val_dataloader):
image, mask = batch
image = image.cuda() # .to(model.device)
logits = model([image, None])
probs = torch.sigmoid(logits)
# preds = torch.argmax(probs, dim=0)
# preds = (probs > threshold).int()
val_predictions.extend(probs.cpu().numpy())
if i == 5:
break
val_predictions = np.array(val_predictions)
# Visualize the validation examples along with their ground truth and predicted segmentation masks
# for i in range(num_val_samples):
# img, mask = val_examples[i]
# img = img.squeeze().numpy()
# mask = mask.squeeze().numpy()[2, ...]
# pred = val_predictions[i, 2, ...]
# print(pred.shape)
# plt.figure(figsize=(12, 4))
# plt.subplot(1, 3, 1)
# plt.imshow(img, cmap='gray')
# plt.title("Original Image")
# plt.axis('off')
# plt.subplot(1, 3, 2)
# visualise_pred(img, pred>0.5, mask)
# plt.title("Prediction (cyan) \w mask (red)")
# plt.axis('off')
# plt.subplot(1, 3, 3)
# visualise_pred(img, pred>0.6, mask)
# plt.title("Prediction (cyan) \w mask (red)")
# plt.axis('off')
# plt.tight_layout()
# plt.show()
predictions.shape
num_samples = 5
import random
random_indices = random.sample(range(len(X_test)), num_samples)
# Visualize the predictions for the selected indices
# for idx in random_indices:
# img = X_test[idx]
# seg = predictions[idx][2,...]
# plt.figure(figsize=(5, 5))
# visualise_pred(img, seg>0.6)
# plt.title(f"Index: {idx}")
# plt.show()
# Now, as we have segmented all the tumors, we are ready to submit our solution. The competition uses [run-length encoding](https://en.wikipedia.org/wiki/Run-length_encoding) to encode the binary predictions and evaluate solutions. Below, we give functions needed to convert your segmentations into the needed format.
# ref.: https://www.kaggle.com/stainsby/fast-tested-rle
def mask2rle(img):
"""
img: numpy array, 1 - mask, 0 - background
Returns run length as string formatted
"""
pixels = img.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return " ".join(str(x) for x in runs)
# test this function on the first training mask
# mask2rle(predictions[0]>0.5)
predictions.shape
output_preds = predictions[:, 2, ...]
output_preds.shape
def create_submission(binary_predictions, filename):
predictions_rles = [
(str(i) + "," + mask2rle(mask)) for i, mask in enumerate(binary_predictions)
]
with open(filename + ".csv", "w") as solution_file:
solution_file.write("Id,Predicted\n")
for i, string in enumerate(predictions_rles):
solution_file.write(string + "\n")
for i in np.arange(0.1, 1, 0.1):
create_submission(output_preds > i, f"sample_submission_{str(i)}")
create_submission(output_preds > 0.5, "sample_submission_0.5")
|
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
from keras import layers, models, optimizers
from keras.preprocessing.image import ImageDataGenerator
def display_grid(data, path, w=10, h=10, columns=4, rows=5):
fig = plt.figure(figsize=(12, 8))
for i in range(1, columns * rows + 1):
file = data[i]
file = os.path.join(path, file)
img = Image.open(file)
fig.add_subplot(rows, columns, i)
imshow(img)
plt.show()
def plot_results(history):
acc = history.history["acc"]
val_acc = history.history["val_acc"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.figure(figsize=(24, 6))
plt.subplot(1, 2, 1)
plt.plot(epochs, acc, "b", label="Training Accuracy")
plt.plot(epochs, val_acc, "r", label="Validation Accuracy")
plt.grid(True)
plt.legend()
plt.xlabel("Epoch")
plt.subplot(1, 2, 2)
plt.plot(epochs, loss, "b", label="Training Loss")
plt.plot(epochs, val_loss, "r", label="Validation Loss")
plt.grid(True)
plt.legend()
plt.xlabel("Epoch")
plt.show()
def get_best_epcoh(history):
valid_acc = history.history["val_acc"]
best_epoch = valid_acc.index(max(valid_acc)) + 1
best_acc = max(valid_acc)
print(
"Best Validation Accuracy Score {:0.5f}, is for epoch {}".format(
best_acc, best_epoch
)
)
return best_epoch
# ## Class Swift
base_dir = "/kaggle/input/cars-wagonr-swift/data/"
train_swift = os.listdir(os.path.join(base_dir, "train/swift"))
val_swift = os.listdir(os.path.join(base_dir, "validation/swift"))
test_swift = os.listdir(os.path.join(base_dir, "test/swift"))
print(
"Instances for Class Swift: Train {}, Validation {} Test {}".format(
len(train_swift), len(val_swift), len(test_swift)
)
)
# Sanity checks: no overlaping bteween train test and validation sets
val_train = [x for x in val_swift if x in train_swift]
test_train = [x for x in test_swift if x in train_swift]
val_test = [x for x in test_swift if x in val_swift]
len(val_train), len(test_train), len(val_test)
display_grid(
data=train_swift,
path=os.path.join(base_dir, "train/swift"),
w=10,
h=10,
columns=8,
rows=5,
)
# ## Class Wagonr
train_wr = os.listdir(os.path.join(base_dir, "train/wagonr"))
val_wr = os.listdir(os.path.join(base_dir, "validation/wagonr"))
test_wr = os.listdir(os.path.join(base_dir, "test/wagonr"))
print(
"Instances for Class Wagonr: Train {}, Validation {} Test {}".format(
len(train_swift), len(val_swift), len(test_swift)
)
)
# Sanity checks: no overlaping bteween train test and validation sets
val_train = [x for x in val_wr if x in train_wr]
test_train = [x for x in test_wr if x in train_wr]
val_test = [x for x in test_wr if x in val_wr]
len(val_train), len(test_train), len(val_test)
display_grid(
data=train_wr,
path=os.path.join(base_dir, "train/wagonr"),
w=10,
h=10,
columns=8,
rows=5,
)
# ## Train using Convolution:
from tensorflow.keras.layers import Input, Dropout, Dense, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import tensorflow.keras.backend as K
N = 10 # Number of nodes in the graph
# Define the GCN architecture
X_in = Input(shape=(N,))
A_in = Input((N, N))
dropout_rate = 0.5
# First GCN layer
h1 = Dense(32, activation="relu")(X_in)
graph_conv = Lambda(lambda x: K.dot(A_in, x))([h1])
graph_conv = Dropout(dropout_rate)(graph_conv)
# Second GCN layer
h2 = Dense(64, activation="relu")(graph_conv)
graph_conv = Lambda(lambda x: K.dot(A_in, x))([h2])
graph_conv = Dropout(dropout_rate)(graph_conv)
# Output layer
predictions = Dense(1, activation="sigmoid")(graph_conv)
# Create the model
model = Model(inputs=[X_in, A_in], outputs=predictions)
# Compile the model
optimizer = Adam(lr=0.01)
model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"])
# ### Data Preprocessing
train_dir = os.path.join(base_dir, "train")
validation_dir = os.path.join(base_dir, "validation")
train_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150), # Resize images to 150 X 150
batch_size=20,
class_mode="binary",
)
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150), # Resize images to 150 X 150
batch_size=20,
class_mode="binary",
)
for (
data_batch,
labels_batch,
) in train_generator:
print("Data Batch shape:", data_batch.shape)
print("Labels Batch shape:", labels_batch.shape)
break
# ### Fit Model
model = GCNConv(display_summary=True)
history = model.fit_generator(
train_generator,
steps_per_epoch=120, # = num_train_images/batch size(2400/20)
epochs=50,
validation_data=validation_generator,
validation_steps=40, # = num_valid_images/batch_size
)
model.save("cat_and_dogs.h5")
# ### Train Vs Validation Accuracy/Loss
plot_results(history)
best_epoch = get_best_epcoh(history)
|
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **Generates K normal 2D distibutions**
import numpy as np
from matplotlib import pyplot as plt
# np.random.seed(999)
K = 12 # number of clusters
N = 20 # number of elements per cluster
L = 15
C = np.random.randint(-L, L, size=[K, 2]) # clusters centers
S = 0.5 * np.ones([K, 2]) # standard deviation per cluster
X = np.random.normal(loc=C[0, :], scale=S[0, :], size=[N, 2])
for k in range(1, K):
X = np.append(X, np.random.normal(loc=C[k, :], scale=S[k, :], size=[N, 2]), axis=0)
# # **Standard K-means Initialization (random)**
ax = plt.axes()
ax.scatter(X[:, 0], X[:, 1], marker=".", c="cyan")
ax.grid(visible=True)
# randomly selects K vectors as cluster centers
C = np.random.permutation(X.shape[0])[: K - 1]
ax.scatter(X[C, 0], X[C, 1], c="r", marker="+")
# # **K-means++ Initialization**
from scipy.spatial.distance import cdist
C = np.zeros([K, 2])
D = np.zeros([N * K, 1])
# randomly selects vector as first cluster center
C[0, :] = X[np.random.permutation(X.shape[0])[0], :]
# distance between each vector and the first cluster center
D[:, 0] = np.transpose(cdist(C, X)[0, :])
# distance between the ith sample and the nearest cluster center
Dmin2 = [D[i, :].min() ** 2 for i in range(0, N * K)]
# empirical probability distribution
# the next cluster will be sampled from this distribution
G = Dmin2 / sum(Dmin2)
|
# <div style="color:black;
# display:fill;
# border-radius:5px;
# font-size:300%;
# font-family: Times New Roman, Times, serif;
# letter-spacing:0.5px"> Introduction
# Corn and maize are basic ingredients in many regions across the world. The ears can be roasted and eaten as a vegetable right from the cob, or the kernels can be extracted and used to make a range of meals, including cereals and flour. Maize is also a significant source of starch, which can be converted into oils and high-fructose corn syrup.
# All of this makes maize and cornmeal very important ingredients, therefore it's understandable why one would want to keep them free of illnesses like Common Rust, Ray Leaf Spot, and Blight.
# These diseases are a major source of concern for maize and corn growers in Asia, Africa, and the Americas. Plant age, pathogen species, and environment all play a role in symptom expression. The diseases are more common in humid, warm climates.
# As a result, early detection of these diseases is critical in order to mitigate the harm.
# The main goal of this project is to use Efficientnet to classify various diseases.
# Hope you’ll enjoy 😃
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
import cv2
from collections import defaultdict
from urllib import request
import os
import pandas as pd
import numpy as np
from urllib import request
import cv2
from tqdm import tqdm
from collections import Counter
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.layers import Dropout, Dense
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
import os
from tensorflow.keras.applications import VGG16
import json
from sklearn import preprocessing
from sklearn.preprocessing import OrdinalEncoder
import tensorflow
import tensorflow as tf
from collections import deque
# <div style="color:black;
# display:fill;
# border-radius:5px;
# font-size:300%;
# font-family: Times New Roman, Times, serif;
# letter-spacing:0.5px"> Train test validation split
#
# Here I used split folders libary to split the data into train test and validation given their ratio.
IMSIZE = 260
from tensorflow.keras.applications.inception_v3 import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
image_color = "rgb"
train_generator = ImageDataGenerator(
rescale=1.0 / 255,
shear_range=0.5,
rotation_range=30,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
).flow_from_directory(
"../input/corn-dataset/splitted_folder/train",
color_mode=image_color,
target_size=(IMSIZE, IMSIZE),
batch_size=32,
class_mode="categorical",
)
validation_generator = ImageDataGenerator(rescale=1 / 255.0).flow_from_directory(
"../input/corn-dataset/splitted_folder/val",
color_mode=image_color,
target_size=(IMSIZE, IMSIZE),
batch_size=32,
class_mode="categorical",
)
test_generator = ImageDataGenerator(rescale=1 / 255.0).flow_from_directory(
"../input/corn-dataset/splitted_folder/test",
color_mode=image_color,
target_size=(IMSIZE, IMSIZE),
batch_size=32,
class_mode="categorical",
)
# <div style="color:black;
# display:fill;
# border-radius:5px;
# font-size:300%;
# font-family: Times New Roman, Times, serif;
# letter-spacing:0.5px"> Data exploration
#
# It can be shown that Gray Leaf spot contains less images than the other images, hence it can affect on the results.
# I'll add class weights in order to address this problem.
# <div style="color:black;
# display:fill;
# border-radius:5px;
# font-size:300%;
# font-family: Times New Roman, Times, serif;
# letter-spacing:0.5px"> Create Model
#
# I used Efficientnet as my pretrained model, with unfreezing all layers since the original data was trained for different task.
import efficientnet.tfkeras as enet
from keras.layers import (
Dense,
Flatten,
Input,
Activation,
Conv2D,
MaxPooling2D,
BatchNormalization,
Dropout,
AveragePooling2D,
)
from keras import Model
# input_size=[IMSIZE,IMSIZE,3]
# input_layer=Input(input_size)
# x=input_layer
#
inputs_1 = tf.keras.Input(shape=(260, 260, 3))
mymodel = enet.EfficientNetB2(
input_shape=(IMSIZE, IMSIZE, 3), include_top=False, weights="imagenet"
)
x = AveragePooling2D(pool_size=(7, 7))(mymodel.output)
x = Flatten()(x)
x = Dense(4, activation="softmax")(x)
output_layer = x
final_model = Model(mymodel.input, output_layer)
final_model.summary()
# from tensorflow.keras.models import Sequential
# from keras import Model
# from tensorflow.keras.layers import Dense,Flatten, Conv2D,Dropout,Input,GlobalAveragePooling2D, AveragePooling2D, Activation, MaxPooling2D, BatchNormalization,Concatenate
# input_size=[IMSIZE,IMSIZE,3]
# input_layer=Input(input_size)
# x=input_layer
# x=Conv2D(64,[7,7],padding = "same", activation = 'relu',kernel_initializer='he_uniform')(x)
# x=BatchNormalization()(x)
# x=Conv2D(64,[7,7],padding = "same", activation = 'relu',kernel_initializer='he_uniform')(x)
# x = AveragePooling2D(pool_size = [2,2])(x)
# x=Dropout(0.2)(x)
# x=Conv2D(128,[5,5],padding = "same", activation = 'relu',kernel_initializer='he_uniform')(x)
# x=BatchNormalization()(x)
# x=Conv2D(128,[5,5],padding = "same", activation = 'relu',kernel_initializer='he_uniform')(x)
# x = AveragePooling2D(pool_size = [2,2])(x)
# x=Dropout(0.2)(x)
# x=Conv2D(256,[4,4],padding = "same", activation = 'relu',kernel_initializer='he_uniform')(x)
# x=BatchNormalization()(x)
# x=Conv2D(256,[4,4],padding = "same", activation = 'relu',kernel_initializer='he_uniform')(x)
# x = AveragePooling2D(pool_size = [2,2])(x)
# x=Dropout(0.2)(x)
# x=Conv2D(512,[3,3],padding = "same", activation = 'relu',kernel_initializer='he_uniform')(x)
# x=BatchNormalization()(x)
# x=Conv2D(512,[3,3],padding = "same", activation = 'relu',kernel_initializer='he_uniform')(x)
# x = GlobalAveragePooling2D()(x)
# x=Dropout(0.2)(x)
# x=Flatten()(x)
# x=Dense(128,activation='relu')(x)
# x=Dropout(0.2)(x)
# x=Dense(32,activation='relu')(x)
# x=Dropout(0.2)(x)
# x=Dense(4,activation='softmax')(x)
# output_layer=x
# final_model=Model(input_layer,output_layer)
# final_model.summary()
def scheduler(epoch, lr):
if epoch < 3:
return lr
else:
return lr * tf.math.exp(-0.1)
opt = tf.keras.optimizers.Adam(0.0001)
final_model.compile(
optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"]
)
def callbacks(patience=2):
checkpoint = tf.keras.callbacks.ModelCheckpoint(
"my_model.h5",
monitor="val_loss",
verbose=1,
save_best_only=True,
save_weights_only=True,
)
early = tf.keras.callbacks.EarlyStopping(
monitor="val_loss", patience=patience, min_delta=0.001
)
lr = tf.keras.callbacks.LearningRateScheduler(scheduler)
callbacks_list = [checkpoint, early, lr]
return callbacks_list
callbacks = callbacks()
counter = Counter(train_generator.classes)
max_val = float(max(counter.values()))
class_weights1 = {
class_id: max_val / num_images for class_id, num_images in counter.items()
}
os.environ["TF_GPU_ALLOCATOR"] = "cuda_malloc_async"
hist = final_model.fit(
train_generator,
epochs=50,
validation_data=(validation_generator),
callbacks=callbacks,
class_weight=class_weights1,
)
final_model.load_weights("./my_model.h5")
loss, acc = final_model.evaluate(validation_generator, verbose=2)
print("Restored model, accuracy: {:5.2f}%".format(100 * acc))
from PIL import Image
picture = Image.open("../input/gray-spot/.jpg").resize([IMSIZE, IMSIZE])
dir(final_model)
picture = np.array(picture).reshape([1, 260, 260, 3])
final_model(picture)
from PIL import Image
picture = Image.open(
"../input/corn-dataset/splitted_folder/train/Common_Rust/Corn_Common_Rust (10).jpg"
).resize([IMSIZE, IMSIZE])
picture = np.array(picture).reshape([1, 260, 260, 3])
final_model(picture)
import os
from PIL import Image
list1 = os.listdir("../input/corn-dataset/splitted_folder/val")
data = []
target = []
size_now = 260
for i in range(len(list1)):
list2 = os.listdir("../input/corn-dataset/splitted_folder/val/" + list1[i])
N = len(list2)
target.append([i] * N)
arr = np.zeros(N * size_now * size_now * 3).reshape(N, size_now, size_now, 3)
for m in range(N):
photo = Image.open(
"../input/corn-dataset/splitted_folder/val/" + list1[i] + "/" + list2[m]
)
photo = photo.resize([size_now, size_now])
photo = np.array(photo)
photo = photo[:, :, 0:3] / 255
arr[m] = photo
data.append(arr)
for i in data:
print(i.shape)
list1
len(data)
arr = np.vstack((data[0], data[1], data[2], data[3]))
tar = list("1" * 261) + list("0" * 229) + list("3" * 232) + list("2" * 114)
tar = [int(element) for element in tar]
len(tar)
y_pre = final_model.predict(arr)
y_pre.shape
dat = pd.DataFrame(y_pre)
dat.head(10)
com = pd.DataFrame()
com["pre"] = dat.apply(lambda element: np.argmax(element), axis=1)
com["true"] = tar
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
com.head(10)
def get_two_correct(per_s):
final_df = pd.DataFrame()
final_df["pre"] = dat.apply(lambda element: np.argmax(element), axis=1)
final_df["true"] = tar
large_index = np.where(dat.max(axis=1) >= per_s)
large1 = final_df.loc[large_index[0]]
print(sum(large1["pre"] == large1["true"]) / large1.shape[0])
min_index = np.where(dat.max(axis=1) < per_s)
min1 = final_df.loc[min_index[0]]
print(sum(min1["pre"] == min1["true"]) / min1.shape[0])
return (
sum(large1["pre"] == large1["true"]) / large1.shape[0],
sum(min1["pre"] == min1["true"]) / min1.shape[0],
)
upper = []
lowwer = []
for per in np.linspace(40, 99, 60) / 100:
upper_now, lowwer_now = get_two_correct(per)
upper.append(upper_now)
lowwer.append(lowwer_now)
plt.plot(upper)
plt.plot(lowwer)
dat.max(axis=1).hist(bins=50)
get_two_correct(0.8)
predictions = np.argmax(final_model.predict(arr), axis=-1)
# ['Common_Rust', 'Blight', 'Healthy', 'Gray_Leaf_Spot']
# [1,0,3,2]
from sklearn.metrics import classification_report
print(classification_report(tar, predictions))
import seaborn as sns
from sklearn.metrics import confusion_matrix
sns.set()
plt.figure(figsize=(20, 16), dpi=60)
C2 = confusion_matrix(tar, predictions)
sns.heatmap(C2, annot=True, cmap="Greens", fmt=".20g")
plt.savefig("heatmap.jpg")
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
y_pred = tar
y_true = predictions
# 对上面进行赋值
C = confusion_matrix(y_true, y_pred, labels=[0, 1, 2, 3]) # 可将'1'等替换成自己的类别,如'cat'。
plt.matshow(C, cmap=plt.cm.Reds) # 根据最下面的图按自己需求更改颜色
# plt.colorbar()
for i in range(len(C)):
for j in range(len(C)):
plt.annotate(
C[j, i], xy=(i, j), horizontalalignment="center", verticalalignment="center"
)
# plt.tick_params(labelsize=15) # 设置左边和上面的label类别如0,1,2,3,4的字体大小。
plt.ylabel("True label")
plt.xlabel("Predicted label")
# plt.ylabel('True label', fontdict={'family': 'Times New Roman', 'size': 20}) # 设置字体大小。
# plt.xlabel('Predicted label', fontdict={'family': 'Times New Roman', 'size': 20})
plt.show()
hist.history.keys()
plt.figure(figsize=(10, 6))
plt.plot(range(1, len(hist.history["loss"]) + 1, 1), hist.history["loss"])
plt.plot(range(1, len(hist.history["loss"]) + 1, 1), hist.history["val_loss"])
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend(["loss", "val_loss"])
plt.savefig("loss.jpg")
plt.show()
plt.figure(figsize=(10, 6))
plt.plot(range(1, len(hist.history["loss"]) + 1, 1), hist.history["accuracy"])
plt.plot(range(1, len(hist.history["loss"]) + 1, 1), hist.history["val_accuracy"])
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.legend(["accuracy", "val_accuracy"])
plt.savefig("accuracy.jpg")
plt.show()
from keras.utils.vis_utils import plot_model
plot_model(final_model, to_file="model_auth.png", show_shapes=True)
|
# # Importing Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import cv2
import warnings
warnings.filterwarnings("ignore")
from PIL import Image
import tensorflow as tf
from sklearn.model_selection import train_test_split
from skimage.transform import resize
from sklearn.metrics import accuracy_score
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
# # 1. Loading the dataset
from tqdm import tqdm
import os
import cv2
import numpy as np
data = []
labels = []
classes = 43
cur_path = "../input/gtsrb-german-traffic-sign/Train"
for i in tqdm(os.listdir(cur_path)):
dir = cur_path + "/" + i
for j in os.listdir(dir):
img_path = dir + "/" + j
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (32, 32), interpolation=cv2.INTER_NEAREST)
data.append(img)
labels.append(i)
data = np.array(data)
labels = np.array(labels)
print(data.shape, labels.shape)
label_map = {
"0": "20_speed",
"1": "30_speed",
"2": "50_speed",
"3": "60_speed",
"4": "70_speed",
"5": "80_speed",
"6": "80_lifted",
"7": "100_speed",
"8": "120_speed",
"9": "no_overtaking_general",
"10": "no_overtaking_trucks",
"11": "right_of_way_crossing",
"12": "right_of_way_general",
"13": "give_way",
"14": "stop",
"15": "no_way_general",
"16": "no_way_trucks",
"17": "no_way_one_way",
"18": "attention_general",
"19": "attention_left_turn",
"20": "attention_right_turn",
"21": "attention_curvy",
"22": "attention_bumpers",
"23": "attention_slippery",
"24": "attention_bottleneck",
"25": "attention_construction",
"26": "attention_traffic_light",
"27": "attention_pedestrian",
"28": "attention_children",
"29": "attention_bikes",
"30": "attention_snowflake",
"31": "attention_deer",
"32": "lifted_general",
"33": "turn_right",
"34": "turn_left",
"35": "turn_straight",
"36": "turn_straight_right",
"37": "turn_straight_left",
"38": "turn_right_down",
"39": "turn_left_down",
"40": "turn_circle",
"41": "lifted_no_overtaking_general",
"42": "lifted_no_overtaking_trucks",
}
# # Sample images from training set
import random
path = "../input/gtsrb-german-traffic-sign/Train"
image_folders = random.sample(os.listdir(path), 5)
fig, ax = plt.subplots(nrows=1, ncols=5, figsize=(15, 5))
k = 0
for i in image_folders:
image_file = random.sample(os.listdir(os.path.join(path, i)), 1)
temp_path = os.path.join(path, i)
final_path = os.path.join(temp_path, image_file[0])
img = Image.open(final_path)
# Plot the image
ax[k].imshow(img)
ax[k].axis("off")
k = k + 1
plt.show()
# # 2. Visualizing the data
import os
import seaborn as sns
classes = sorted(os.listdir(cur_path))
# count the number of images in each class
num_images = [len(os.listdir(os.path.join(cur_path, cls))) for cls in classes]
# create a DataFrame to store the data
data_num = {"Classes": classes, "Number of images": num_images}
df = pd.DataFrame(data_num)
# create a bar plot using Seaborn
sns.set_style("whitegrid")
plt.figure(figsize=(12, 6))
sns.barplot(x="Classes", y="Number of images", data=df)
plt.xlabel("Classes")
plt.ylabel("Number of images")
plt.title("Distribution of images across classes")
plt.show()
# # 3. Model
# defining model structure
model = Sequential()
model.add(
Conv2D(filters=32, kernel_size=(5, 5), activation="relu", input_shape=(32, 32, 3))
)
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu"))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(rate=0.5))
model.add(Dense(43, activation="softmax"))
# model compilation
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
import visualkeras
visualkeras.graph_view(model)
X_train, X_test, y_train, y_test = train_test_split(
data, labels, test_size=0.25, random_state=21
)
print((X_train.shape, y_train.shape), (X_test.shape, y_test.shape))
# converting the labels into one hot encoding
y_train = to_categorical(y_train, 43)
y_test = to_categorical(y_test, 43)
# # Training the model
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
callbacks = [
EarlyStopping(monitor="val_loss", patience=20),
ModelCheckpoint(filepath="best_model.h5", monitor="val_loss", save_best_only=True),
]
history = model.fit(
X_train,
y_train,
batch_size=128,
epochs=50,
callbacks=callbacks,
validation_data=(X_test, y_test),
verbose=1,
)
import json
with open("history.json", "w") as f:
json.dump(history.history, f)
# # Plots
import seaborn as sns
# Set up the figure
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10, 5))
# Plot the accuracy
sns.lineplot(
data=history.history,
x=range(len(history.history["accuracy"])),
y="accuracy",
ax=ax1,
label="train accuracy",
palette=["blue"],
)
sns.lineplot(
data=history.history,
x=range(len(history.history["val_accuracy"])),
y="val_accuracy",
ax=ax1,
label="test accuracy",
palette=["orange"],
)
ax1.set_title("Accuracy")
ax1.set_xlabel("epochs")
ax1.set_ylabel("Accuracy")
# Plot the loss
sns.lineplot(
data=history.history,
x=range(len(history.history["loss"])),
y="loss",
ax=ax2,
label="train loss",
palette=["blue"],
)
sns.lineplot(
data=history.history,
x=range(len(history.history["val_loss"])),
y="val_loss",
ax=ax2,
label="test loss",
palette=["orange"],
)
ax2.set_title("Loss")
ax2.set_xlabel("epochs")
ax2.set_ylabel("Loss")
plt.tight_layout()
plt.show()
# # Model testing
y_test = pd.read_csv("../input/gtsrb-german-traffic-sign/Test.csv")
# # Sample images from test set
path = "../input/gtsrb-german-traffic-sign/Test"
image_files = random.sample(os.listdir(path), 5)
fig, ax = plt.subplots(nrows=1, ncols=5, figsize=(15, 5))
for i in range(len(image_files)):
final_path = os.path.join(path, image_files[i])
img = Image.open(final_path)
ax[i].imshow(img)
ax[i].axis("off")
plt.show()
labels_test = y_test["ClassId"].values
img_test = y_test["Path"].values
test_dir = "../input/gtsrb-german-traffic-sign"
data = []
for img in img_test:
img_path = os.path.join(test_dir, img)
# print(img_path)
image = cv2.imread(img_path, -1)
image = cv2.resize(image, (32, 32), interpolation=cv2.INTER_NEAREST)
data.append(np.array(image))
X_test = np.array(data)
# # Predictions of Test set
pred = np.argmax(model.predict(X_test), axis=1)
print("Test accuracy: ", accuracy_score(labels_test, pred) * 100)
# # Predicted Outputs
import numpy as np
from tensorflow.keras.preprocessing import image
t = y_test.sample(n=5)
img_test = t["Path"].values
test_dir = "../input/gtsrb-german-traffic-sign"
for i in range(5):
img_path = os.path.join(test_dir, img_test[i])
img1 = Image.open(img_path)
plt.imshow(np.array(img1))
plt.show()
img = image.load_img(img_path, target_size=(32, 32))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
# Make predictions on the single image
predictions = model.predict(img_array)
# Print the predicted class
predicted_class = np.argmax(predictions)
print("Predicted class:", label_map[str(predicted_class)])
# # Confusion Matrix
from sklearn.metrics import confusion_matrix
import seaborn as sns
fig = plt.figure(figsize=(15, 15))
y_true = np.array(y_test["ClassId"])
cm = confusion_matrix(y_true, pred)
sns.heatmap(cm, cmap="Blues")
from sklearn.metrics import (
precision_score,
recall_score,
confusion_matrix,
f1_score,
accuracy_score,
)
y_pred = pred
precision = precision_score(y_true, y_pred, average="macro")
recall = recall_score(y_true, y_pred, average="macro")
f1 = f1_score(y_true, y_pred, average="macro")
accuracy = accuracy_score(y_true, y_pred)
print("Precision:", precision)
print("Recall:", recall)
print("F1 Score:", f1)
print("Accuracy:", accuracy)
|
# # **IMAGE CLASSIFIER USING SEQUENTIAL API**
# # **Setting up the environment**
import numpy as np
import pandas as pd
import tensorflow as tf
data = tf.keras.datasets.fashion_mnist.load_data()
(X_train_full, y_train_full), (X_test, y_test) = data
X_train, y_train = X_train_full[:-5000], y_train_full[:-5000]
X_valid, y_valid = X_train_full[-5000:], y_train_full[-5000:]
X_train.shape
X_train.dtype
# # **Train-Test split**
X_train, X_valid, X_test = X_train / 255.0, X_valid / 255.0, X_test / 255.0
import matplotlib.pyplot as plt
plt.imshow(X_train[0], cmap="binary")
plt.axis("off")
plt.show()
class_names = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
class_names[y_train[0]]
n_rows = 4
n_cols = 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(X_train[index], cmap="binary", interpolation="nearest")
plt.axis("off")
plt.title(class_names[y_train[index]])
plt.subplots_adjust(wspace=0.2, hspace=0.5)
plt.show()
# # **Creating the model**
tf.random.set_seed(42)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Input(shape=[28, 28]))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(300, activation="relu"))
model.add(tf.keras.layers.Dense(100, activation="relu"))
model.add(tf.keras.layers.Dense(10, activation="softmax"))
model.summary()
tf.keras.utils.plot_model(model, "my_fashion_mnist_model.png", show_shapes=True)
model.layers
hidden1 = model.layers[1]
weights, biases = hidden1.get_weights()
weights
biases
weights.shape
biases.shape
# # **Compiling the model**
model.compile(
loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"]
)
# # **Training and evaluating the model**
history = model.fit(X_train, y_train, epochs=30, validation_data=(X_valid, y_valid))
history.params
import pandas as pd
pd.DataFrame(history.history).plot(
figsize=(8, 5),
xlim=[0, 29],
ylim=[0, 1],
grid=True,
xlabel="Epoch",
style=["r--", "r--.", "b-", "b-*"],
)
plt.legend(loc="lower left") # extra code
plt.show()
model.evaluate(X_test, y_test)
# # **Using the models for making predictions**
X_new = X_test[:5]
y_proba = model.predict(X_new)
y_proba.round(2)
y_pred = y_proba.argmax(axis=-1)
y_pred
np.array(class_names)[y_pred]
y_new = y_test[:5]
y_new
plt.figure(figsize=(7.2, 2.4))
for index, image in enumerate(X_new):
plt.subplot(1, 6, index + 1)
plt.imshow(image, cmap="binary", interpolation="nearest")
plt.axis("off")
plt.title(class_names[y_test[index]])
plt.subplots_adjust(wspace=0.2, hspace=0.5)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
heart_disease = pd.read_csv("/kaggle/input/heart-disease-uci/heart.csv")
heart_disease.head()
heart_disease.info()
heart_disease.isnull().sum()
heart_disease["target"]
heart_disease.sex[heart_disease.target == 1].value_counts()
import matplotlib.pyplot as plt
heart_disease.sex[heart_disease.target == 1].value_counts().plot(
kind="bar", figsize=(5, 5), color=["lightblue", "pink"]
)
plt.title("Heart disease for males(1) and females(0)")
plt.xlabel("Gender")
plt.ylabel("Number of heart patients")
# pd.crosstab computes the frequency tables of the factors in the table
pd.crosstab(heart_disease.target, heart_disease.sex).plot(
kind="barh", figsize=(10, 5), color=["lightblue", "pink"]
)
plt.title("Frequency of Heart Disease vs Sex")
plt.xlabel(" '0' = Heart Disease, '1' = No disease")
plt.ylabel("Number of people with heart disease")
plt.legend(["Male", "Female"])
plt.xticks(rotation=0)
# we want to visualize the correlations using a correlation heatmap
import seaborn as sns
correlation_map = heart_disease.corr()
fig, ax = plt.subplots(figsize=(20, 10))
sns.heatmap(correlation_map, annot=True, linewidths=0.5, fmt=".3f")
heart_disease.columns
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
features = [
"age",
"sex",
"cp",
"trestbps",
"chol",
"fbs",
"restecg",
"thalach",
"exang",
"oldpeak",
"slope",
"ca",
"thal",
"target",
]
heart_disease[features] = scaler.fit_transform(heart_disease[features])
heart_disease.head()
# Standardize features by removing the mean and scaling to unit variance
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
features = ["age", "trestbps", "chol", "thalach", "oldpeak"]
heart_disease.head()
X_data = heart_disease.drop("target", axis=1)
Y_data = heart_disease["target"]
Y_data.head()
X_data.head()
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(
X_data, Y_data, random_state=0, test_size=0.2
)
|
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print(f"pandas version = {pd.__version__}")
print(f"numpy version = {np.__version__}")
print(f"seaborn version = {sns.__version__}")
df = pd.read_csv(
"/kaggle/input/the-best-cities-for-a-workation/best cities for a workation.csv"
)
df.sample(10)
df.columns
df.info()
# all rows is not-null
cols = [
"Remote connection: Average WiFi speed (Mbps per second)",
"Co-working spaces: Number of co-working spaces",
"Caffeine: Average price of buying a coffee",
"Travel: Average price of taxi (per km)",
"After-work drinks: Average price for 2 beers in a bar",
"Accommodation: Average price of 1 bedroom apartment per month",
"Food: Average cost of a meal at a local, mid-level restaurant",
"Climate: Average number of sunshine hours",
"Tourist attractions: Number of ‘Things to do’ on Tripadvisor",
"Instagramability: Number of photos with #",
]
df[cols].hist(layout=(1, len(cols)), figsize=(3 * len(cols), 3.5))
from sklearn import preprocessing
cols = [
"Remote connection: Average WiFi speed (Mbps per second)",
"Co-working spaces: Number of co-working spaces",
"Caffeine: Average price of buying a coffee",
"Travel: Average price of taxi (per km)",
"After-work drinks: Average price for 2 beers in a bar",
"Accommodation: Average price of 1 bedroom apartment per month",
"Food: Average cost of a meal at a local, mid-level restaurant",
"Climate: Average number of sunshine hours",
"Tourist attractions: Number of ‘Things to do’ on Tripadvisor",
"Instagramability: Number of photos with #",
]
pt = preprocessing.PowerTransformer(method="yeo-johnson", standardize=True)
mat = pt.fit_transform(df[cols])
mat[:10].round(4)
X = pd.DataFrame(mat, columns=cols)
X
dX = pd.concat([df[["Ranking", "City", "Country"]], X], axis=1)
dX
X.hist(layout=(1, len(cols)), figsize=(3 * len(cols), 3.5))
from sklearn.cluster import KMeans
# elbow method finding best n_cluster
ssd = []
for k in range(1, 9):
m = KMeans(n_clusters=k)
m.fit(X)
ssd.append((k, m.inertia_))
ssd
dfX = pd.DataFrame(ssd, columns=["k", "ssd"])
dfX["pct_change"] = dfX["ssd"].pct_change() * 100
dfX
sns.lineplot(x=dfX["k"], y=dfX["ssd"], linestyle="--", marker="o")
for i, r in dfX.iterrows():
plt.text(r["k"], r["ssd"], f"{r['pct_change']:.2f}", fontsize=10)
plt.show()
model = KMeans(n_clusters=3)
model.fit(X)
model.transform(X)
model.labels_
df["cluster"] = model.labels_
dX["cluster"] = model.labels_
df
sns.pairplot(df, vars=cols, hue="cluster")
sns.countplot(data=df, x="cluster")
cols = [
"Remote connection: Average WiFi speed (Mbps per second)",
"Co-working spaces: Number of co-working spaces",
"Caffeine: Average price of buying a coffee",
"Travel: Average price of taxi (per km)",
"After-work drinks: Average price for 2 beers in a bar",
"Accommodation: Average price of 1 bedroom apartment per month",
"Food: Average cost of a meal at a local, mid-level restaurant",
"Climate: Average number of sunshine hours",
"Tourist attractions: Number of ‘Things to do’ on Tripadvisor",
"Instagramability: Number of photos with #",
]
fig, ax = plt.subplots(nrows=5, ncols=2, figsize=(10, 30))
ax = ax.ravel()
for i, col in enumerate(cols):
sns.boxenplot(data=df, x="cluster", y=col, ax=ax[i])
dX.groupby("cluster").median()
sns.heatmap(
dX.drop("Ranking", axis="columns").groupby("cluster").median(),
cmap="Reds",
linewidths=1,
)
print("cluster 0")
print(df.loc[(df.cluster == 0), ["City", "Country"]])
print("cluster 1")
print(df.loc[(df.cluster == 1), ["City", "Country"]])
print("cluster 2")
print(df.loc[(df.cluster == 2), ["City", "Country"]])
# df.loc[(df.cluster==0)]
### Model interpretation
# Cluster 0 -> Triple hot -> Hot coffee, Hot as Travel Destination and Hot as hell
# Cluster 1 -> Top Travel Destination countries with High cost of living
# Cluster 2 -> Culture Capital -> Some are not capital but popular as capitals
|
# # Libraries
import pandas as pd
import numpy as np
# # Import Data
test = pd.read_csv("/kaggle/input/titanic/test.csv")
train = pd.read_csv(
"/kaggle/input/titanic/train.csv",
dtype={
"Name": "string",
"Sex": "string",
"Ticket": "string",
"Cabin": "string",
"Embarked": "string",
},
)
sub = pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
# # Processing
cabin_class1_distribution = (
train[train["Pclass"] == 1]["Cabin"].value_counts()
/ train[train["Pclass"] == 1]["Cabin"].value_counts().sum()
)
cabin_class2_distribution = (
train[train["Pclass"] == 2]["Cabin"].value_counts()
/ train[train["Pclass"] == 2]["Cabin"].value_counts().sum()
)
cabin_class3_distribution = (
train[train["Pclass"] == 3]["Cabin"].value_counts()
/ train[train["Pclass"] == 3]["Cabin"].value_counts().sum()
)
test.loc[test["Fare"].isnull(), "Fare"] = test["Fare"].mean()
train["Embarked"] = train["Embarked"].fillna(train["Embarked"].value_counts().index[0])
dfs = [train, test]
for df in dfs:
df["Family Size"] = df["SibSp"] + df["Parch"]
df.loc[(df["Cabin"].isnull()) & (df["Pclass"] == 1), "Cabin"] = np.random.choice(
cabin_class1_distribution.index,
df.loc[(df["Cabin"].isnull()) & (df["Pclass"] == 1)].shape[0],
cabin_class1_distribution.values,
)
df.loc[(df["Cabin"].isnull()) & (df["Pclass"] == 2), "Cabin"] = np.random.choice(
cabin_class2_distribution.index,
df.loc[(df["Cabin"].isnull()) & (df["Pclass"] == 2)].shape[0],
cabin_class2_distribution.values,
)
df.loc[(df["Cabin"].isnull()) & (df["Pclass"] == 3), "Cabin"] = np.random.choice(
cabin_class3_distribution.index,
df.loc[(df["Cabin"].isnull()) & (df["Pclass"] == 3)].shape[0],
cabin_class3_distribution.values,
)
test.loc[
(~test["Cabin"].isin(train["Cabin"])) & (test["Pclass"] == 1), "Cabin"
] = np.random.choice(
cabin_class1_distribution.index,
test.loc[(~test["Cabin"].isin(train["Cabin"])) & (test["Pclass"] == 1)].shape[0],
cabin_class1_distribution.values,
)
test.loc[
(~test["Cabin"].isin(train["Cabin"])) & (test["Pclass"] == 2), "Cabin"
] = np.random.choice(
cabin_class2_distribution.index,
test.loc[(~test["Cabin"].isin(train["Cabin"])) & (test["Pclass"] == 2)].shape[0],
cabin_class2_distribution.values,
)
test.loc[
(~test["Cabin"].isin(train["Cabin"])) & (test["Pclass"] == 3), "Cabin"
] = np.random.choice(
cabin_class3_distribution.index,
test.loc[(~test["Cabin"].isin(train["Cabin"])) & (test["Pclass"] == 3)].shape[0],
cabin_class3_distribution.values,
)
# # Interpolating Age
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt
encode_sex = {j: i for i, j in enumerate(train["Sex"].unique())}
encode_embarked = {j: i for i, j in enumerate(train["Embarked"].unique())}
encode_cabin = {j: i for i, j in enumerate(train["Cabin"].unique())}
df = train.drop(["PassengerId", "Survived", "Name", "Ticket"], axis=1).dropna().copy()
df["Sex"] = df["Sex"].map(encode_sex)
df["Embarked"] = df["Embarked"].map(encode_embarked)
df["Cabin"] = df["Cabin"].map(encode_cabin)
def reg_test_train(df, target, split):
test = df.sample(int(np.round(df.shape[0] * split[1])))
x_test = test.drop(target, axis=1).copy()
y_test = test[[target]].copy()
train = df.loc[~df.index.isin(test.index)].copy()
x_train = train.drop(target, axis=1).copy()
y_train = train[[target]].copy()
return x_test, x_train, y_test, y_train
x_test, x_train, y_test, y_train = reg_test_train(df, "Age", [0.8, 0.2])
regr = RandomForestRegressor(max_depth=10)
regr.fit(x_train, y_train.values.ravel())
# ### Feature Importance
pd.DataFrame({"A": regr.feature_importances_, "B": regr.feature_names_in_}).sort_values(
by="A", ascending=True
).plot(kind="barh", x="B")
plt.title("Feature Importance Plot")
plt.ylabel("Feature")
plt.xlabel("Score")
plt.show()
# ### Results on Train
res = regr.predict(x_train)
pd.Series((np.array([i[0] for i in np.array(y_train)]) - res)).hist(bins=20)
plt.show()
# ### Results on Test
res = regr.predict(x_test)
pd.Series((np.array([i[0] for i in np.array(y_test)]) - res)).hist(bins=20)
plt.show()
# ### Fitting to Missing Values
dfs = [train, test]
for df in dfs:
df["Sex"] = df["Sex"].map(encode_sex)
df["Embarked"] = df["Embarked"].map(encode_embarked)
df["Cabin"] = df["Cabin"].map(encode_cabin)
# Filling in Age
try:
df.loc[df["Age"].isnull(), "Age"] = regr.predict(
df.loc[df["Age"].isnull()].drop(
["Age", "Survived", "PassengerId", "Name", "Ticket"], axis=1
)
)
except:
df.loc[df["Age"].isnull(), "Age"] = regr.predict(
df.loc[df["Age"].isnull()].drop(
["Age", "PassengerId", "Name", "Ticket"], axis=1
)
)
# # Predicting Survival
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
df = train.drop(["PassengerId", "Name", "Ticket"], axis=1).dropna().copy()
# ### Train/Test Split
def reg_test_train(df, target, class1, class2):
size = 300
split = [0.5, 0.5]
train = pd.concat(
[
df.loc[df[target] == class1].sample(int(np.round(size * split[0]))),
df.loc[df[target] == class2].sample(int(np.round(size * split[1]))),
],
axis=0,
)
x_train = train.drop(target, axis=1).copy()
y_train = train[[target]].copy()
test = df.loc[~df.index.isin(train.index)].copy()
x_test = test.drop(target, axis=1).copy()
y_test = test[[target]].copy()
return x_test, x_train, y_test, y_train
x_test, x_train, y_test, y_train = reg_test_train(df, "Survived", 0, 1)
# ### Fitting Model
clf = RandomForestClassifier(max_depth=10)
clf.fit(x_train, y_train.values.ravel())
# ### Feature Importance
pd.DataFrame({"A": clf.feature_importances_, "B": clf.feature_names_in_}).sort_values(
by="A", ascending=True
).plot(kind="barh", x="B")
plt.title("Feature Importance Plot")
plt.ylabel("Feature")
plt.xlabel("Score")
plt.show()
# ### Residuals on Train
survived_key = {0: "No", 1: "Yes"}
res = clf.predict(x_train)
cm = confusion_matrix(y_train, res, labels=clf.classes_)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=survived_key.values())
disp.plot()
plt.show()
# ### Residuals on Validation
res = clf.predict(x_test)
cm = confusion_matrix(y_test, res, labels=clf.classes_)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=survived_key.values())
disp.plot()
plt.show()
# ### Predicting on Test
test["Survived"] = clf.predict(
test.drop(["PassengerId", "Name", "Ticket"], axis=1).dropna().copy()
)
test[["PassengerId", "Survived"]].to_csv("Sub.csv", index=False)
|
import copy
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from tqdm import tqdm
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, cross_validate, cross_val_score
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, roc_auc_score
from sklearn.linear_model import LogisticRegression
# # Discription of data from Article
# * **Area**: It gave the number of pixels within the borders of a pumpkin seed
# * **Perimeter**: It gave the circumference in pixels of a pumpkin seed
# * **Major Axis Length**: It gave the circumference in pixels of a pumpkin seed
# * **Minor Axis Length**: It gave the small axis distance of a pumpkin seed
# * **Eccentricity**: It gave the eccentricity of a pumpkin seed
# * **Convex Area**: It gave the number of pixels of the smallest convex shell at the region formed by the pumpkin seed
# * **Extent**: It returned the ratio of a pumpkin seed area to the bounding box pixels
# * **Equiv Diameter**: It was formed by multiplying the area of the pumpkin seed by four and dividing by the number pi, and taking the square root
# * **Compactness**: It proportioned the area of the pumpkin seed relative to the area of the circle with the same circumference
# * **Solidity**: It considered the convex and convex condition of the pumpkin seeds
# * **Roundness**: It measured the ovality of pumpkin seeds without considering its distortion of the edges
# * **Aspect Ratio**: It gave the aspect ratio of the pumpkin seeds
# # Read, transform and split data
pumpkin = pd.read_excel(
"/kaggle/input/pumpkin-seeds-dataset/Pumpkin_Seeds_Dataset/Pumpkin_Seeds_Dataset.xlsx"
)
pumpkin.head()
pumpkin.info()
# All data without missing values.
pumpkin["Class"].unique()
pumpkin["target"] = np.nan
pumpkin.loc[pumpkin["Class"] == "Çerçevelik", "target"] = 0
pumpkin.loc[pumpkin["Class"] == "Ürgüp Sivrisi", "target"] = 1
len(pumpkin)
pumpkin["target"].mean()
x_train, x_test, y_train, y_test = train_test_split(
pumpkin,
pumpkin["target"],
test_size=0.2,
random_state=42,
stratify=pumpkin["target"],
)
features = list(x_train.columns)[:-2]
# Let's see features distribution and find outliers (only extreme ones with more then 3 IQR)
for col in features:
print(col)
plt.figure(figsize=(13, 8))
a = pumpkin[col][pumpkin["Class"] == "Çerçevelik"]
b = pumpkin[col][pumpkin["Class"] == "Ürgüp Sivrisi"]
bins = np.linspace(pumpkin[col].min(), pumpkin[col].max(), 50)
plt.hist(a, bins, alpha=0.5, label="Çerçevelik")
plt.hist(b, bins, alpha=0.5, label="Ürgüp Sivrisi")
plt.legend(loc="upper left")
plt.show()
sns.boxplot(x=pumpkin[col], y=pumpkin["Class"], whis=3)
# We suppose that outliers are from another distribution than the main data. Let's see on the graph. The rule of 3 IQR good works for all features except two of them: 'Eccentricity' and 'Solidity'. For 'Eccentricity' and 'Solidity' outlier values are less than 0.7 and 0.97, respectively. Function named **del_extreme_outlier** drop this outliers.
# # Baseline
clf = LogisticRegression(random_state=12)
clf.fit(x_train[features], x_train["target"])
y_pred = clf.predict(x_test[features])
proba = clf.predict_proba(x_test[features])[:, 1]
y_pred_t = clf.predict(x_train[features])
proba_t = clf.predict_proba(x_train[features])[:, 1]
print(
"Train: accuracy_score = ",
accuracy_score(y_pred_t, y_train),
"roc_auc_score = ",
roc_auc_score(y_train, proba_t),
)
print(
"Test: accuracy_score = ",
accuracy_score(y_pred, y_test),
"roc_auc_score = ",
roc_auc_score(y_test, proba),
)
# # Define some useful functions
def bins_generation(df, feature, bins_num=10):
df = df[[feature, "target"]]
df[f"bin_{feature}"], bins = pd.qcut(
df[feature], bins_num, duplicates="drop", retbins=True
)
bins[0] = -np.inf
bins[-1] = np.inf
df[f"bin_{feature}"] = pd.cut(df[feature], bins)
df[f"bin_{feature}"] = df[f"bin_{feature}"].apply(lambda x: x.left)
df[f"bin_{feature}"] = df[f"bin_{feature}"].astype(float)
return df, bins
def check_bins_generation(df, feature, bins_num=5):
for i in range(bins_num, 1, -1):
df_binned, bins = bins_generation(df, feature, i)
xg = pd.DataFrame(
df_binned.groupby(f"bin_{feature}").agg({feature: "count", "target": "sum"})
).reset_index()
if (
xg[feature].min() > len(df) * 0.2
and not (xg[feature] == xg["target"]).any()
):
break
return df_binned, bins
def woe_calc(df, feature, target):
df = pd.concat([df[feature], target], axis=1)
df = pd.concat(
[
df.groupby(df.columns.values[0], as_index=False)[
df.columns.values[1]
].count(),
df.groupby(df.columns.values[0], as_index=False)[
df.columns.values[1]
].mean(),
],
axis=1,
)
df = df.iloc[:, [0, 1, 3]]
df.columns = [df.columns.values[0], "count", "tr"]
df["n_good"] = df["tr"] * df["count"]
df["n_bad"] = (1 - df["tr"]) * df["count"]
df["good_rate"] = df["n_good"] / df["n_good"].sum()
df["bad_rate"] = df["n_bad"] / df["n_bad"].sum()
df["WoE"] = np.log(df["good_rate"] / df["bad_rate"])
return df
def del_extreme_outlier(pumpkin):
cols = pumpkin.columns
cols = cols.drop(["Class", "Eccentricity", "Solidity", "target"])
pumpkin_C = pumpkin[pumpkin["Class"] == "Çerçevelik"]
pumpkin_U = pumpkin[pumpkin["Class"] == "Ürgüp Sivrisi"]
for col in cols: # only large outliers over 3 IQR
Q1, Q3 = np.percentile(pumpkin_C[col], [25, 75])
IQR = Q3 - Q1
top_C = Q3 + 3 * IQR
bottom_C = Q1 - 3 * IQR
Q1, Q3 = np.percentile(pumpkin_U[col], [25, 75])
IQR = Q3 - Q1
top_U = Q3 + 3 * IQR
bottom_U = Q1 - 3 * IQR
pumpkin = pumpkin[
(
(pumpkin["Class"] == "Çerçevelik")
& (pumpkin[col] > bottom_C)
& (pumpkin[col] < top_C)
)
| (
(pumpkin["Class"] == "Ürgüp Sivrisi")
& (pumpkin[col] > bottom_U)
& (pumpkin[col] < top_U)
)
]
pumpkin = pumpkin[pumpkin["Eccentricity"] > 0.7]
pumpkin = pumpkin[pumpkin["Solidity"] > 0.97]
return pumpkin
def plot_by_woe(woe):
x = np.array(woe.iloc[:, 0].apply(str))
y = woe["WoE"]
plt.figure(figsize=(15, 10))
plt.plot(x, y, marker="o", color="violet", markersize=20, markerfacecolor="gray")
plt.xlabel(woe.columns[0])
plt.ylabel("WoE")
plt.title(str("WoE of " + woe.columns[0]))
plt.xticks(rotation=45)
plt.show()
def forward():
ginis = {}
for feat in list(set(woe_feat_names) - set(forward_feature_list)):
logreg = LogisticRegression(random_state=0)
scores = cross_val_score(
logreg,
x_train[forward_feature_list + [feat]].astype(float),
x_train["target"],
scoring="roc_auc",
cv=3,
)
ginis[feat] = scores.mean()
new_feat = max(ginis, key=ginis.get)
gini_list.append(max(ginis.values()))
if (gini_list[-1] - gini_list[-2]) < 0.0005:
print("END")
else:
forward_feature_list.append(new_feat)
print("Add new feature: ", new_feat)
print("roc_auc_score: ", gini_list[-1])
print("-------------------------------")
def recalc_forward():
logreg = LogisticRegression(random_state=0)
scores = cross_val_score(
logreg,
x_train[forward_feature_list],
x_train["target"],
scoring="roc_auc",
cv=3,
)
gini_list.append(scores.mean())
print("roc_auc_score: ", gini_list[-1])
print("-------------------------------")
def aWoE_num(df, feat):
df[feat] = pd.cut(df[feat[:-4]], bins[feat])
df[feat] = df[feat].apply(lambda x: x.left).astype(float)
df[feat + "_woe"] = df[feat].apply(lambda x: woe[woe[feat] == x]["WoE"].values[0])
return df
def add_WoE_num(x_test, feat):
x_test = aWoE_num(x_test, feat)
print(x_test[["target", feat]].groupby(feat).agg(["count", "mean"]))
def cWoE_num(df, feat):
df[feat + "_woe"] = df[feat].apply(lambda x: woe[woe[feat] == x]["WoE"].values[0])
return df
def change_WoE_num(x_test, x_train, feat):
x_train = cWoE_num(x_train, feat)
x_test = cWoE_num(x_test, feat)
print(x_test[["target", feat]].groupby(feat).agg(["count", "mean"]))
def merge_bins(train, test, feature, from_, to_):
train.loc[train[train[feature].isin(from_)].index, feature] = to_
test.loc[test[test[feature].isin(from_)].index, feature] = to_
return train, test
# # Features pretreatment
# The main idea is using WoE insted of features values. To do this we need to split values into bins (each bin should be more than 10% of all data) and replace values in each bin by WoE of this bin.
x_train = del_extreme_outlier(x_train)
y_train = x_train["target"]
bin_feat_names = []
bins = {}
for feat in tqdm(features):
a, bin_ = check_bins_generation(x_train, feat, 5)
x_train[f"{feat}_bin"] = a[f"bin_{feat}"]
bin_feat_names.append(f"{feat}_bin")
bins[f"{feat}_bin"] = bin_
woe_feat_names = []
for feat in tqdm(bin_feat_names):
woe = woe_calc(x_train, feat, x_train["target"])
x_train[f"{feat}_woe"] = x_train[feat].apply(
lambda x: woe[woe[feat] == x]["WoE"].values[0]
)
woe_feat_names.append(f"{feat}_woe")
# # Advanced baseline
# The main idea is using WoE of features bins instead of features values.
x1_test = copy.deepcopy(x_test)
for feat in features:
feat = feat + "_bin"
woe = woe_calc(x_train, feat, x_train["target"])
add_WoE_num(x1_test, feat)
clf = LogisticRegression(random_state=12)
clf.fit(x_train[woe_feat_names], x_train["target"])
y_pred_t = clf.predict(x_train[woe_feat_names])
proba_t = clf.predict_proba(x_train[woe_feat_names])[:, 1]
y_pred = clf.predict(x1_test[woe_feat_names])
proba = clf.predict_proba(x1_test[woe_feat_names])[:, 1]
print(
"Train: accuracy_score = ",
accuracy_score(y_pred_t, y_train),
"roc_auc_score = ",
roc_auc_score(y_train, proba_t),
)
print(
"Test: accuracy_score = ",
accuracy_score(y_pred, y_test),
"roc_auc_score = ",
roc_auc_score(y_test, proba),
)
# * Baseline model:
# Train roc_auc_score = 0.938,
# Test roc_auc_score = 0.929
# * Advanced baseline model:
# Train roc_auc_score = 0.943,
# Test roc_auc_score = 0.937
# As can be seen from the previous data, the quality of the model increases. The next step is using forward selection to find the most useful features and bins merging to reduce overfitting.
# # Forward selection
forward_feature_list = []
gini_list = [0]
forward()
woe = woe_calc(x_train, "Compactness_bin", x_train["target"])
plot_by_woe(woe)
add_WoE_num(x_test, "Compactness_bin")
forward()
woe = woe_calc(x_train, "Solidity_bin", x_train["target"])
plot_by_woe(woe)
add_WoE_num(x_test, "Solidity_bin")
x_train, x_test = merge_bins(x_train, x_test, "Solidity_bin", [0.988], -np.inf)
change_WoE_num(x_test, x_train, "Solidity_bin")
recalc_forward()
forward()
woe = woe_calc(x_train, "Roundness_bin", x_train["target"])
plot_by_woe(woe)
add_WoE_num(x_test, "Roundness_bin")
forward()
woe = woe_calc(x_train, "Extent_bin", x_train["target"])
plot_by_woe(woe)
add_WoE_num(x_test, "Extent_bin")
x_train, x_test = merge_bins(x_train, x_test, "Extent_bin", [0.661], 0.714)
change_WoE_num(x_test, x_train, "Extent_bin")
recalc_forward()
forward()
woe = woe_calc(x_train, "Major_Axis_Length_bin", x_train["target"])
plot_by_woe(woe)
add_WoE_num(x_test, "Major_Axis_Length_bin")
forward()
woe = woe_calc(x_train, "Minor_Axis_Length_bin", x_train["target"])
plot_by_woe(woe)
add_WoE_num(x_test, "Minor_Axis_Length_bin")
x_train, x_test = merge_bins(
x_train, x_test, "Minor_Axis_Length_bin", [211.173], 224.514
)
change_WoE_num(x_test, x_train, "Minor_Axis_Length_bin")
recalc_forward()
forward()
clf = LogisticRegression(random_state=12)
clf.fit(x_train[forward_feature_list], x_train["target"])
y_pred_test = clf.predict(x_test[forward_feature_list])
proba_test = clf.predict_proba(x_test[forward_feature_list])[:, 1]
y_pred_train = clf.predict(x_train[forward_feature_list])
proba_train = clf.predict_proba(x_train[forward_feature_list])[:, 1]
print(
"Train: accuracy_score = ",
accuracy_score(y_pred_train, x_train["target"]),
"roc_auc_score = ",
roc_auc_score(x_train["target"], proba_train),
)
print(
"Test: accuracy_score = ",
accuracy_score(y_pred_test, x_test["target"]),
"roc_auc_score = ",
roc_auc_score(x_test["target"], proba_test),
)
|
# ### **[TensorFlow Hub](https://tfhub.dev/)** is a repository of pre-trained TensorFlow models.
# standard libraries
import numpy as np
import time
import PIL.Image as Image
import matplotlib.pylab as plt
import matplotlib.image as mpimg
import datetime
from tqdm.keras import TqdmCallback
from skimage import transform
# tensorflow libraries
import tensorflow as tf
import tensorflow_hub as hub
# path variables
train_path = "/kaggle/input/broken-eggs/train"
test_path = "/kaggle/input/broken-eggs/test"
# define image shape & batch size
batch_size = 32
img_height = 299
img_width = 299
# load training images
train_ds = tf.keras.utils.image_dataset_from_directory(
train_path,
validation_split=0.1,
subset="training",
seed=0,
image_size=(img_height, img_width),
batch_size=batch_size,
)
# load test/validation images
val_ds = tf.keras.utils.image_dataset_from_directory(
test_path,
validation_split=0.2,
subset="validation",
seed=0,
image_size=(img_height, img_width),
batch_size=batch_size,
)
# target class names
class_names = train_ds.class_names
print("the target classes are: ", *class_names, sep=" ,")
# rescaling the images for the model
"""TensorFlow Hub's convention for image models is to expect float inputs in the [0, 1] range"""
normalization_layer = tf.keras.layers.Rescaling(1.0 / 255)
train_ds = train_ds.map(
lambda x, y: (normalization_layer(x), y)
) # Where x—images, y—labels.
val_ds = val_ds.map(
lambda x, y: (normalization_layer(x), y)
) # Where x—images, y—labels.
"""finish the input pipeline by using buffered prefetching with Dataset.prefetch, so you can yield the data from disk without I/O blocking issues."""
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# get the headless model
"""TensorFlow Hub also distributes models without the top classification layer. These can be used to easily perform transfer learning."""
# feature vector model
inception_resnet_v2_fv = (
"https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/5"
)
feature_extractor_model = inception_resnet_v2_fv
# feature extraction layer
"""Create the feature extractor by wrapping the pre-trained model as a Keras layer with hub.KerasLayer. Use the trainable=False argument to freeze the variables, so that the training only modifies the new classifier layer"""
feature_extractor_layer = hub.KerasLayer(
feature_extractor_model, input_shape=(img_width, img_height, 3), trainable=False
)
# feature_batch = feature_extractor_layer(image_batch)
# add a classification layer
num_classes = len(class_names)
model = tf.keras.Sequential(
[feature_extractor_layer, tf.keras.layers.Dense(num_classes)]
)
# model summary
model.summary()
# compile the model
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["acc"],
)
# early stopping
early_stopping = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=3)
# Define Epochs
NUM_EPOCHS = 20
# train the model
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=NUM_EPOCHS,
callbacks=[early_stopping, TqdmCallback(verbose=0)],
verbose=0,
)
# view model accuracy
model_acc = "{:.2%}".format(history.history["acc"][-1])
print(f"\n Model Accuracy Reached: {model_acc}")
# summarize history for accuracy
plt.subplot(1, 2, 1)
plt.plot(history.history["acc"])
plt.plot(history.history["val_acc"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# summarize history for loss
plt.subplot(1, 2, 2)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# prediction on an image from test-folder > crack
test_img_path = "/kaggle/input/broken-eggs/test/crack/2023-04-09 21_11_04.025236.jpg"
test_image = Image.open(test_img_path)
test_image = np.array(test_image).astype("float32") / 255
test_image = transform.resize(test_image, (img_width, img_height, 3))
test_image = np.expand_dims(test_image, axis=0)
# make predictions
prediction = model.predict(test_image)
pred_class = prediction.argmax()
print(f"The Predicted Class: {class_names[pred_class]}")
# view the test-image
plt.figure(figsize=(8, 8))
test_img = mpimg.imread(test_img_path)
plt.imshow(test_img)
plt.title("predicted class: " + class_names[pred_class])
plt.axis("off")
plt.show()
|
import os
from distutils.dir_util import copy_tree
import shutil
destination = "/kaggle/working/"
for directory in os.listdir("/kaggle/input/"):
source = f"/kaggle/input/{directory}/"
for file in os.listdir(source):
if len(file) > 2:
if file[:1].isdigit():
shutil.copy(source + file, destination)
# import os
# from distutils.dir_util import copy_tree
# destination = '/kaggle/working/'
# for directory in os.listdir("/kaggle/input/"):
# source = f'/kaggle/input/{directory}'
# copy_tree(source, destination)
# #os.remove('temp.csv')
# source = '/kaggle/input/fork-of-process-data300-2'
# destination = '/kaggle/working/'
# copy_tree(source, destination)
# #import os
# #os.remove('temp.csv')
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import cv2
import warnings
warnings.filterwarnings("ignore")
from PIL import Image
import tensorflow as tf
from sklearn.model_selection import train_test_split
from skimage.transform import resize
from sklearn.metrics import accuracy_score
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
# # 1. Loading the dataset
# loading dataset
data = []
labels = []
classes = 43
path = "../input/gtsrb-german-traffic-sign/Train"
for i in os.listdir(cur_path):
dir = path + "/" + i
for j in os.listdir(dir):
img_path = dir + "/" + j
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (30, 30), interpolation=cv2.INTER_NEAREST)
data.append(img)
labels.append(i)
data = np.array(data)
labels = np.array(labels)
print(data.shape, labels.shape)
i_path = "../input/gtsrb-german-traffic-sign/Train/1/00001_00072_00027.png"
plt.imshow(cv2.imread(i_path, -1))
X_train, X_test, y_train, y_test = train_test_split(
data, labels, test_size=0.25, random_state=10
)
print((X_train.shape, y_train.shape), (X_test.shape, y_test.shape))
# converting the labels into one hot encoding
y_train = to_categorical(y_train, 43)
y_test = to_categorical(y_test, 43)
# # 2. Visualizing the data
# number of images in each class
dic = {}
for folder in os.listdir(path):
dic[folder] = len(os.listdir(path + "/" + folder))
data_df = pd.Series(dic)
plt.figure(figsize=(15, 6))
data_df.sort_values().plot(kind="bar")
plt.xlabel("Classes")
plt.ylabel("Number of images")
# # 3. Model
# defining model structure
model = Sequential()
model.add(
Conv2D(filters=32, kernel_size=(5, 5), activation="relu", input_shape=(30, 30, 3))
)
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu"))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(rate=0.5))
model.add(Dense(43, activation="softmax"))
# model compilation
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
import visualkeras
visualkeras.layered_view(model)
epochs = 15
history = model.fit(
X_train, y_train, batch_size=64, epochs=epochs, validation_data=(X_test, y_test)
)
# # 4. Plots
plt.figure(0)
plt.plot(history.history["accuracy"], label="train accuracy")
plt.plot(history.history["val_accuracy"], label="test accuracy")
plt.title("Accuracy")
plt.xlabel("epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.figure(0)
plt.plot(history.history["loss"], label="train loss")
plt.plot(history.history["val_loss"], label="test loss")
plt.title("Loss")
plt.xlabel("epochs")
plt.ylabel("Loss")
plt.legend()
# # 5. Model testing on test dataset
y_test = pd.read_csv("../input/gtsrb-german-traffic-sign/Test.csv")
plt.imshow(cv2.imread("../input/gtsrb-german-traffic-sign/Test/00000.png"))
labels_test = y_test["ClassId"].values
img_test = y_test["Path"].values
test_dir = "../input/gtsrb-german-traffic-sign"
data = []
for img in img_test:
img_path = os.path.join(test_dir, img)
# print(img_path)
image = cv2.imread(img_path, -1)
image = cv2.resize(image, (30, 30), interpolation=cv2.INTER_NEAREST)
data.append(np.array(image))
X_test = np.array(data)
pred = model.predict_classes(X_test)
print("Test accuracy: ", accuracy_score(labels_test, pred) * 100)
from tensorflow import keras
model.save("model2.h5")
# model = keras.models.load_model('path/to/location')
# # Model has 96% accuracy on the test data
from tensorflow import keras
model = keras.models.load_model("model2.h5")
label_map = {
"0": "20_speed",
"1": "30_speed",
"2": "50_speed",
"3": "60_speed",
"4": "70_speed",
"5": "80_speed",
"6": "80_lifted",
"7": "100_speed",
"8": "120_speed",
"9": "no_overtaking_general",
"10": "no_overtaking_trucks",
"11": "right_of_way_crossing",
"12": "right_of_way_general",
"13": "give_way",
"14": "stop",
"15": "no_way_general",
"16": "no_way_trucks",
"17": "no_way_one_way",
"18": "attention_general",
"19": "attention_left_turn",
"20": "attention_right_turn",
"21": "attention_curvy",
"22": "attention_bumpers",
"23": "attention_slippery",
"24": "attention_bottleneck",
"25": "attention_construction",
"26": "attention_traffic_light",
"27": "attention_pedestrian",
"28": "attention_children",
"29": "attention_bikes",
"30": "attention_snowflake",
"31": "attention_deer",
"32": "lifted_general",
"33": "turn_right",
"34": "turn_left",
"35": "turn_straight",
"36": "turn_straight_right",
"37": "turn_straight_left",
"38": "turn_right_down",
"39": "turn_left_down",
"40": "turn_circle",
"41": "lifted_no_overtaking_general",
"42": "lifted_no_overtaking_trucks",
}
import numpy as np
from tensorflow.keras.preprocessing import image
t = y_test.sample()
img_test = t["Path"].values
test_dir = "../input/gtsrb-german-traffic-sign"
img_path = os.path.join(test_dir, img_test[0])
img1 = Image.open(img_path)
plt.imshow(np.array(img1))
plt.show()
img = image.load_img(img_path, target_size=(30, 30))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
# Make predictions on the single image
predictions = model.predict(img_array)
# Print the predicted class
predicted_class = np.argmax(predictions)
print("Predicted class:", label_map[str(predicted_class)])
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import stats
def grad_step(w, X, y):
return X.T @ (sigmoid(X @ w) - y) / len(y)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def bce_loss(y_true, y_pred):
return -np.mean(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
skyfall = pd.read_csv("weatherAUS_train.csv", delimiter=",")
Test = pd.read_csv("weatherAUS_test.csv", delimiter=",")
skyfall
y = skyfall["RainTomorrow"]
skyfall = skyfall.drop(["RainTomorrow"], axis=1)
skyfall["Date"] = pd.to_datetime(skyfall["Date"])
skyfall["Year"] = skyfall["Date"].dt.year
skyfall["Id"] = pd.Series(np.arange(101822))
skyfall["Month"] = skyfall["Date"].dt.month
skyfall["Day"] = skyfall["Date"].dt.day
skyfall.drop("Date", axis=1, inplace=True)
Test["Date"] = pd.to_datetime(Test["Date"])
Test["Year"] = Test["Date"].dt.year
Test["Month"] = Test["Date"].dt.month
Test["Day"] = Test["Date"].dt.day
Test.drop("Date", axis=1, inplace=True)
categorical = [var for var in skyfall.columns if skyfall[var].dtype == "O"]
numerical = [var for var in Test.columns if Test[var].dtype != "O"]
for df2 in [skyfall, Test]:
df2["WindGustDir"].fillna(skyfall["WindGustDir"].mode()[0], inplace=True)
df2["WindDir9am"].fillna(skyfall["WindDir9am"].mode()[0], inplace=True)
df2["WindDir3pm"].fillna(skyfall["WindDir3pm"].mode()[0], inplace=True)
df2["RainToday"].fillna(skyfall["RainToday"].mode()[0], inplace=True)
skyfall
towns = skyfall["Location"].unique()
for df1 in [skyfall, Test]:
l = len(df1)
loc = np.array(df1["Location"])
mon = np.array(df1["Month"])
for town in towns:
ind_t = np.where(loc == town)[0]
for month in range(1, 13):
ind_tm = [i for i in ind_t if mon[i] == month]
for col in categorical:
a = np.array(df1[col])
ind_tm_1 = [i for i in ind_tm if a[i] == a[i]]
ind_tm_0 = [i for i in ind_tm if a[i] != a[i]]
if len(ind_tm_1) > 0:
col_tm_mode = stats.mode(a[ind_tm_1])[0]
else:
col_tm_mode = stats.mode(np.array([x for x in a if x == x]))[0]
df1.loc[df1.index[ind_tm_0], col] = col_tm_mode
for df1 in [skyfall, Test]:
l = len(df1)
loc = np.array(df1["Location"])
mon = np.array(df1["Month"])
for town in towns:
ind_t = np.where(loc == town)[0]
for month in range(1, 13):
ind_tm = [i for i in ind_t if mon[i] == month]
for col in numerical:
a = np.array(df1[col])
ind_tm_1 = [i for i in ind_tm if a[i] == a[i]]
ind_tm_0 = [i for i in ind_tm if a[i] != a[i]]
if len(ind_tm_1) > 0:
col_tm_mean = a[ind_tm_1].mean()
else:
col_tm_mean = np.mean(np.array([x for x in a if x == x]))
df1.loc[df1.index[ind_tm_0], col] = col_tm_mean
skyfall
skyfall["Evaporation"].fillna(value=skyfall["Evaporation"].mean(), inplace=True)
skyfall["MinTemp"].fillna(value=skyfall["MinTemp"].mean(), inplace=True)
skyfall["MaxTemp"].fillna(value=skyfall["MaxTemp"].mean(), inplace=True)
skyfall["Rainfall"].fillna(value=skyfall["Rainfall"].mean(), inplace=True)
skyfall["Sunshine"].fillna(value=skyfall["Sunshine"].mean(), inplace=True)
skyfall["WindGustSpeed"].fillna(value=skyfall["WindGustSpeed"].mean(), inplace=True)
skyfall["WindSpeed3pm"].fillna(value=skyfall["WindSpeed3pm"].mean(), inplace=True)
skyfall["WindSpeed9am"].fillna(value=skyfall["WindSpeed9am"].mean(), inplace=True)
skyfall["Cloud9am"].fillna(value=skyfall["Cloud9am"].mean(), inplace=True)
skyfall["Cloud3pm"].fillna(value=skyfall["Cloud3pm"].mean(), inplace=True)
skyfall["Temp9am"].fillna(value=skyfall["Temp9am"].mean(), inplace=True)
skyfall["Temp3pm"].fillna(value=skyfall["Temp3pm"].mean(), inplace=True)
skyfall["Pressure9am"].fillna(value=skyfall["Pressure9am"].mean(), inplace=True)
skyfall["Humidity3pm"].fillna(value=skyfall["Humidity3pm"].mean(), inplace=True)
skyfall["Humidity9am"].fillna(value=skyfall["Humidity9am"].mean(), inplace=True)
skyfall["Pressure3pm"].fillna(value=skyfall["Pressure3pm"].mean(), inplace=True)
Test["Evaporation"].fillna(value=skyfall["Evaporation"].mean(), inplace=True)
Test["MinTemp"].fillna(value=skyfall["MinTemp"].mean(), inplace=True)
Test["MaxTemp"].fillna(value=skyfall["MaxTemp"].mean(), inplace=True)
Test["Rainfall"].fillna(value=skyfall["Rainfall"].mean(), inplace=True)
Test["Sunshine"].fillna(value=skyfall["Sunshine"].mean(), inplace=True)
Test["WindGustSpeed"].fillna(value=skyfall["WindGustSpeed"].mean(), inplace=True)
Test["WindSpeed3pm"].fillna(value=skyfall["WindSpeed3pm"].mean(), inplace=True)
Test["WindSpeed9am"].fillna(value=skyfall["WindSpeed9am"].mean(), inplace=True)
Test["Cloud9am"].fillna(value=skyfall["Cloud9am"].mean(), inplace=True)
Test["Cloud3pm"].fillna(value=skyfall["Cloud3pm"].mean(), inplace=True)
Test["Temp9am"].fillna(value=skyfall["Temp9am"].mean(), inplace=True)
Test["Temp3pm"].fillna(value=skyfall["Temp3pm"].mean(), inplace=True)
Test["Pressure9am"].fillna(value=skyfall["Pressure9am"].mean(), inplace=True)
Test["Humidity3pm"].fillna(value=skyfall["Humidity3pm"].mean(), inplace=True)
Test["Humidity9am"].fillna(value=skyfall["Humidity9am"].mean(), inplace=True)
Test["Pressure3pm"].fillna(value=skyfall["Pressure3pm"].mean(), inplace=True)
skyfall = pd.concat(
[
skyfall[numerical],
skyfall[["RainToday"]],
pd.get_dummies(skyfall.Location),
pd.get_dummies(skyfall.WindGustDir),
pd.get_dummies(skyfall.WindDir9am),
pd.get_dummies(skyfall.WindDir3pm),
],
axis=1,
)
Test = pd.concat(
[
Test[numerical],
Test[["RainToday"]],
pd.get_dummies(Test.Location),
pd.get_dummies(Test.WindGustDir),
pd.get_dummies(Test.WindDir9am),
pd.get_dummies(Test.WindDir3pm),
],
axis=1,
)
Test
# Изменение столбца RainToday
a = np.ones(len(skyfall["RainToday"]))
b = np.zeros(len(skyfall["RainToday"]))
skyfall["RainToday"] = np.where(skyfall["RainToday"] == "Yes", a, b)
skyfall["RainToday"] = pd.to_numeric(skyfall["RainToday"])
a = np.ones(len(Test["RainToday"]))
b = np.zeros(len(Test["RainToday"]))
Test["RainToday"] = np.where(Test["RainToday"] == "Yes", a, b)
Test["RainToday"] = pd.to_numeric(Test["RainToday"])
skyfall.tail(20)
print(round(skyfall[numerical].describe()), 2)
print(round(Test[numerical].describe()), 2)
IQR = skyfall.Rainfall.quantile(0.75) - skyfall.Rainfall.quantile(0.25)
Lower_fence = skyfall.Rainfall.quantile(0.25) - (IQR * 3)
Upper_fence = skyfall.Rainfall.quantile(0.75) + (IQR * 3)
print(
"skyfall Rainfall outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
IQR = Test.Rainfall.quantile(0.75) - Test.Rainfall.quantile(0.25)
Lower_fence = Test.Rainfall.quantile(0.25) - (IQR * 3)
Upper_fence = Test.Rainfall.quantile(0.75) + (IQR * 3)
print(
"Test Rainfall outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
IQR = skyfall.Evaporation.quantile(0.75) - skyfall.Evaporation.quantile(0.25)
Lower_fence = skyfall.Evaporation.quantile(0.25) - (IQR * 3)
Upper_fence = skyfall.Evaporation.quantile(0.75) + (IQR * 3)
print(
"skyfall Evaporation outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
IQR = Test.Evaporation.quantile(0.75) - Test.Evaporation.quantile(0.25)
Lower_fence = Test.Evaporation.quantile(0.25) - (IQR * 3)
Upper_fence = Test.Evaporation.quantile(0.75) + (IQR * 3)
print(
"Test Evaporation outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
IQR = skyfall.WindSpeed9am.quantile(0.75) - skyfall.WindSpeed9am.quantile(0.25)
Lower_fence = skyfall.WindSpeed9am.quantile(0.25) - (IQR * 3)
Upper_fence = skyfall.WindSpeed9am.quantile(0.75) + (IQR * 3)
print(
"skyfall WindSpeed9am outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
IQR = Test.WindSpeed9am.quantile(0.75) - Test.WindSpeed9am.quantile(0.25)
Lower_fence = Test.WindSpeed9am.quantile(0.25) - (IQR * 3)
Upper_fence = Test.WindSpeed9am.quantile(0.75) + (IQR * 3)
print(
"Test WindSpeed9am outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
IQR = skyfall.WindSpeed3pm.quantile(0.75) - skyfall.WindSpeed3pm.quantile(0.25)
Lower_fence = skyfall.WindSpeed3pm.quantile(0.25) - (IQR * 3)
Upper_fence = skyfall.WindSpeed3pm.quantile(0.75) + (IQR * 3)
print(
"skyfall WindSpeed3pm outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
IQR = Test.WindSpeed3pm.quantile(0.75) - Test.WindSpeed3pm.quantile(0.25)
Lower_fence = Test.WindSpeed3pm.quantile(0.25) - (IQR * 3)
Upper_fence = Test.WindSpeed3pm.quantile(0.75) + (IQR * 3)
print(
"Test WindSpeed3pm outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
def max_value(df3, variable, top):
return np.where(df3[variable] > top, top, df3[variable])
def min_value(df3, variable, low):
return np.where(df3[variable] < low, low, df3[variable])
for df3 in [skyfall, Test]:
df3["Rainfall"] = max_value(df3, "Rainfall", 4.0)
df3["Evaporation"] = max_value(df3, "Evaporation", 9.280406676678158)
df3["Evaporation"] = min_value(df3, "Evaporation", 0.38969499249138106)
df3["WindSpeed9am"] = max_value(df3, "WindSpeed9am", 55)
df3["WindSpeed3pm"] = max_value(df3, "WindSpeed3pm", 57)
# Нормировка
cols = skyfall.columns
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
skyfall = scaler.fit_transform(skyfall)
Test = scaler.transform(Test)
skyfall = pd.DataFrame(skyfall, columns=[cols])
Test = pd.DataFrame(Test, columns=[cols])
skyfall
# Корреляционный анализ ниже
corr = skyfall.corr(method="pearson")
corr
towns = [] # список всех городов
for i in range(49):
towns.append(list(skyfall)[i + 21])
names = list(skyfall)
names
# Найдём города, наиболее близкие к морю (near_sea) и наиболее далекие от моря (inland)
print(corr.loc[towns[0], "Rainfall"].values[0][0])
print(corr.loc[towns[0], "WindSpeed3pm"].values[0][0])
print(corr.loc[towns[0], "Evaporation"].values[0][0])
print(corr.loc[towns[0], "Cloud3pm"].values[0][0])
print(corr.loc[towns[0], "Humidity3pm"].values[0][0])
print(corr.loc[towns[0], "Pressure3pm"].values[0][0])
near_sea = []
for i in towns:
# print(i,corr.loc[i,'Rainfall'].values[0][0])
if (
(corr.loc[i, "Cloud3pm"].values[0][0]) > 0.01
and (corr.loc[i, "Humidity3pm"].values[0][0]) > 0.01
and (corr.loc[i, "Pressure3pm"].values[0][0]) > 0.01
):
near_sea.append(i)
near_sea
# (corr.loc[i,'Cloud3pm'].values[0][0]) < -0.01 and
# and (corr.loc[i,'Pressure3pm'].values[0][0]) < -0.01
inland = []
for i in towns:
# print(i,corr.loc[i,'Rainfall'].values[0][0])
if (corr.loc[i, "Humidity3pm"].values[0][0]) < -0.01 and (
corr.loc[i, "Pressure3pm"].values[0][0]
) < -0.01:
inland.append(i)
inland
# Добавим их в skyfall и Test с соотв. значениями по параметру близости к морю (1 и -1 соотв., а иначе 0)
skyfall["NearSea"] = pd.Series(np.zeros(101822))
for i in near_sea:
a = np.where(np.array(skyfall[i]) != 0)[0]
for j in range(len(a)):
skyfall.iat[a[j], 118] = 1.0
for i in inland:
a = np.where(np.array(skyfall[i]) != 0)[0]
for j in range(len(a)):
skyfall.iat[a[j], 118] = -1.0
# Ниже для Test
Test["NearSea"] = pd.Series(np.zeros(43638))
for i in near_sea:
a = np.where(np.array(Test[i]) != 0)[0]
for j in range(len(a)):
Test.iat[a[j], 118] = 1.0
for i in inland:
a = np.where(np.array(Test[i]) != 0)[0]
for j in range(len(a)):
Test.iat[a[j], 118] = -1.0
skyfall
# Все эти операции добавили нам одну единичку, улучшив рез-тат на 15 стотысячных
# Попробуем с засушливостью, параметры: солнечный свет, температура в 3, амплитуда максимальной и минимальной
a = np.array(skyfall["MinTemp"]).reshape(1, len(np.array(skyfall["MinTemp"])))
b = np.array(skyfall["MaxTemp"]).reshape(1, len(np.array(skyfall["MaxTemp"])))
# print(a,b)
c = a - b
skyfall["DeltaTemp"] = pd.Series(c[0])
skyfall
corr = skyfall.corr(method="pearson")
corr
dry_city = []
for i in towns:
if (
(corr.loc[i, "Sunshine"].values[0][0]) > 0.01
and (corr.loc[i, "DeltaTemp"].values[0][0]) > 0.01
and (corr.loc[i, "Temp3pm"].values[0][0]) > 0.01
):
dry_city.append(i)
dry_city
not_dry = []
for i in towns:
# print(i,corr.loc[i,'Rainfall'].values[0][0])
if (
(corr.loc[i, "Sunshine"].values[0][0]) < -0.01
and (corr.loc[i, "DeltaTemp"].values[0][0]) < -0.01
and (corr.loc[i, "Temp3pm"].values[0][0]) < -0.01
):
not_dry.append(i)
not_dry
skyfall["Tropics"] = pd.Series(np.zeros(101822))
for i in dry_city:
a = np.where(np.array(skyfall[i]) != 0)[0]
for j in range(len(a)):
skyfall.iat[a[j], 120] = 1.0
for i in not_dry:
a = np.where(np.array(skyfall[i]) != 0)[0]
for j in range(len(a)):
skyfall.iat[a[j], 120] = -1.0
a = np.array(Test["MinTemp"]).reshape(1, len(np.array(Test["MinTemp"])))
b = np.array(Test["MaxTemp"]).reshape(1, len(np.array(Test["MaxTemp"])))
c = a - b
Test["DeltaTemp"] = pd.Series(c[0])
Test["Tropics"] = pd.Series(np.zeros(43638))
for i in dry_city:
a = np.where(np.array(Test[i]) != 0)[0]
for j in range(len(a)):
Test.iat[a[j], 120] = 1.0
for i in not_dry:
a = np.where(np.array(Test[i]) != 0)[0]
for j in range(len(a)):
Test.iat[a[j], 120] = -1.0
Test
del skyfall["MinTemp"]
del Test["MinTemp"]
skyfall
# Удалил MinTemp, потому что и Min, и Max, и delta -- слишком много и коррелируют друг с другом
# Давайте теперь возьмем разности у всех параметров с двумя временами
a = np.array(skyfall["WindSpeed3pm"]).reshape(1, len(np.array(skyfall["WindSpeed3pm"])))
b = np.array(skyfall["WindSpeed9am"]).reshape(1, len(np.array(skyfall["WindSpeed9am"])))
c = a - b
skyfall["SpeedFrac"] = pd.Series(c[0])
a = np.array(skyfall["Humidity3pm"]).reshape(1, len(np.array(skyfall["Humidity3pm"])))
b = np.array(skyfall["Humidity9am"]).reshape(1, len(np.array(skyfall["Humidity9am"])))
c = a - b
skyfall["HumidityFrac"] = pd.Series(c[0])
a = np.array(skyfall["Pressure3pm"]).reshape(1, len(np.array(skyfall["Pressure3pm"])))
b = np.array(skyfall["Pressure9am"]).reshape(1, len(np.array(skyfall["Pressure9am"])))
c = a - b
skyfall["PressureFrac"] = pd.Series(c[0])
a = np.array(skyfall["Cloud3pm"]).reshape(1, len(np.array(skyfall["Cloud3pm"])))
b = np.array(skyfall["Cloud9am"]).reshape(1, len(np.array(skyfall["Cloud9am"])))
c = a - b
skyfall["CloudFrac"] = pd.Series(c[0])
a = np.array(skyfall["Temp3pm"]).reshape(1, len(np.array(skyfall["Temp3pm"])))
b = np.array(skyfall["Temp9am"]).reshape(1, len(np.array(skyfall["Temp9am"])))
c = a - b
skyfall["TempFrac"] = pd.Series(c[0])
a = np.array(Test["WindSpeed3pm"]).reshape(1, len(np.array(Test["WindSpeed3pm"])))
b = np.array(Test["WindSpeed9am"]).reshape(1, len(np.array(Test["WindSpeed9am"])))
c = a - b
Test["SpeedFrac"] = pd.Series(c[0])
a = np.array(Test["Humidity3pm"]).reshape(1, len(np.array(Test["Humidity3pm"])))
b = np.array(Test["Humidity9am"]).reshape(1, len(np.array(Test["Humidity9am"])))
c = a - b
Test["HumidityFrac"] = pd.Series(c[0])
a = np.array(Test["Pressure3pm"]).reshape(1, len(np.array(Test["Pressure3pm"])))
b = np.array(Test["Pressure9am"]).reshape(1, len(np.array(Test["Pressure9am"])))
c = a - b
Test["PressureFrac"] = pd.Series(c[0])
a = np.array(Test["Cloud3pm"]).reshape(1, len(np.array(Test["Cloud3pm"])))
b = np.array(Test["Cloud9am"]).reshape(1, len(np.array(Test["Cloud9am"])))
c = a - b
Test["CloudFrac"] = pd.Series(c[0])
a = np.array(Test["Temp3pm"]).reshape(1, len(np.array(Test["Temp3pm"])))
b = np.array(Test["Temp9am"]).reshape(1, len(np.array(Test["Temp9am"])))
c = a - b
Test["TempFrac"] = pd.Series(c[0])
del (
skyfall["WindSpeed9am"],
skyfall["Humidity9am"],
skyfall["Pressure9am"],
skyfall["Cloud9am"],
skyfall["Temp9am"],
)
del (
Test["WindSpeed9am"],
Test["Humidity9am"],
Test["Pressure9am"],
Test["Cloud9am"],
Test["Temp9am"],
)
del skyfall["RainToday"], Test["RainToday"]
# Шаманство с столбцами
a = np.array(skyfall["Rainfall"].T)[0] ** (2) # его изменение вообще плохо даёт
skyfall["Rainfall"] = a
b = np.array(Test["Rainfall"].T)[0] ** (2)
Test["Rainfall"] = b
a = np.array(skyfall["Sunshine"].T)[0] ** (1 / 4) # в квадрат -- неплохо!
skyfall["Sunshine"] = a
b = np.array(Test["Sunshine"].T)[0] ** (1 / 4)
Test["Sunshine"] = b
a = (np.array(skyfall["Pressure3pm"].T)[0]) ** (1 / 2) # не стоит изменять вообще
skyfall["Pressure3pm"] = a
b = (np.array(Test["Pressure3pm"].T)[0]) ** (1 / 2)
Test["Pressure3pm"] = b
a = (np.array(skyfall["Temp3pm"].T)[0]) ** (2)
skyfall["Temp3pm"] = a
b = (np.array(Test["Temp3pm"].T)[0]) ** (2)
Test["Temp3pm"] = b
a = (np.array(skyfall["Evaporation"].T)[0]) ** (1 / 8) # это хорошо
skyfall["Evaporation"] = a
b = (np.array(Test["Evaporation"].T)[0]) ** (1 / 8)
Test["Evaporation"] = b
a = np.array(skyfall["Cloud3pm"].T)[0] ** (1 / 8) # это хорошо
skyfall["Cloud3pm"] = a
b = np.array(Test["Cloud3pm"].T)[0] ** (1 / 8)
Test["Cloud3pm"] = b
cols = skyfall.columns
np.array(cols).shape
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
skyfall = scaler.fit_transform(skyfall)
Test = scaler.transform(Test)
skyfall = pd.DataFrame(skyfall, columns=np.array(cols))
Test = pd.DataFrame(Test, columns=np.array(cols))
# train a logistic regression model on the training set
from sklearn.linear_model import LogisticRegression
# instantiate the model
logreg = LogisticRegression(solver="lbfgs", random_state=0)
# fit the model
logreg.fit(skyfall, y)
y_pred_test = logreg.predict(Test)
len(np.where(y_pred_test != 0)[0])
y_pred_train = logreg.predict(skyfall)
from sklearn.metrics import confusion_matrix
print("Train metrics")
print(confusion_matrix(y, y_pred_train))
answer = pd.DataFrame({"ID": np.arange(43638), "RainTomorrow": y_pred_test})
answer
answer.to_csv("AUSout21.csv", index=False)
# первый нормальный рез-тат - 6613, второй - 6614, третий - 6615; 6626 -- уже плохо!
from sklearn.metrics import confusion_matrix
print("Train metrics")
print(confusion_matrix(y_train, y_pred_train))
|
import cv2
import os
from torchvision import models
import torchvision.transforms as T
from PIL import Image
import torch
import numpy as np
import mediapipe as mp
Link_List = [
"https://www.youtube.com/watch?v=NglE9QX38Jo",
"https://www.youtube.com/watch?v=qa9_p5Y2Zmo",
"https://www.youtube.com/watch?v=rtXRnC34LHs",
"https://www.youtube.com/watch?v=HXUQrPRF5Jw",
"https://www.youtube.com/watch?v=NLjuKZXg-2A",
"https://www.youtube.com/watch?v=zqGexL99EIA",
"https://www.youtube.com/watch?v=g2WZPy0dABY",
"https://www.youtube.com/watch?v=lGMjtuQ4f-s",
"https://www.youtube.com/watch?v=vrWajnsJSgE",
"https://www.youtube.com/watch?v=9ELp9w-4ssw",
]
from pytube import YouTube
for count, link in enumerate(Link_List):
# Create YouTube object
yt = YouTube(link)
Audio = yt.streams.get_by_itag(139)
Audio.download("", str(count) + "Asl.mp4")
# Select highest resolution stream
stream = yt.streams.get_highest_resolution()
# Download video to current working directory
stream.download("", str(count) + "vid.mp4")
# GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# # Ready Segmentation
model = models.segmentation.deeplabv3_resnet101(pretrained=1).eval()
model.to(device)
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
static_image_mode=False,
max_num_hands=2,
min_detection_confidence=0.2,
min_tracking_confidence=0.2,
)
def crop(image, source, nc=21):
label_colors = np.array(
[
(0, 0, 0), # 0=background
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
# 11=dining table, 12=dog, 13=horse, 14=motorbike, 15=person
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(192, 128, 128),
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
(0, 0, 0),
]
)
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, nc):
idx = image == l
r[idx] = label_colors[l, 0]
g[idx] = label_colors[l, 1]
b[idx] = label_colors[l, 2]
rgb = np.stack([r, g, b], axis=2)
foreground = source
# foreground=cv2.cvtColor(foreground,cv2.COLOR_BGR2RGB)
foreground = cv2.resize(foreground, (r.shape[1], r.shape[0]))
background = 255 * np.ones_like(rgb).astype(np.uint8)
foreground = foreground.astype(float)
background = background.astype(float)
th, alpha = cv2.threshold(np.array(rgb), 0, 255, cv2.THRESH_BINARY)
alpha = cv2.GaussianBlur(alpha, (7, 7), 0)
alpha = alpha.astype(float) / 255
foreground = cv2.multiply(alpha, foreground)
background = cv2.multiply(1.0 - alpha, background)
outImage = cv2.add(foreground, background)
return outImage
def modify(img, width, height):
transforms = T.Compose(
[
T.ToTensor(),
T.Resize(640),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
inp = transforms(img).unsqueeze(0)
inp = inp.to(device)
out = model(inp)["out"]
om = torch.argmax(out.squeeze(), dim=0).detach().cpu().numpy()
result = crop(
om, img
) # use result = crop(om,source) while using crop function discussed later
result = cv2.resize(result, (width, height))
result = np.uint8(result)
return result
def Create_Dataset(vid_path, count=0):
cap = cv2.VideoCapture(vid_path)
# # Get the video frame rate and total number of frames
frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# # Set the number of frames to skip per minute
frames_per_minute = frame_rate * 60
# # Create a directory to save the frames
output_folder_bg = "Frames_bg"
output_folder_ng = "Frames"
if not os.path.exists(output_folder_bg):
os.makedirs(output_folder_bg)
if not os.path.exists(output_folder_ng):
os.makedirs(output_folder_ng)
# # Get the video dimensions
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# # Setup Video Writer
vid_write_bg = cv2.VideoWriter(
output_folder_bg + str(count) + "_video.avi",
cv2.VideoWriter_fourcc(*"MJPG"),
frame_rate,
(int(cap.get(3)), int(cap.get(4))),
)
vid_write_ng = cv2.VideoWriter(
output_folder_ng + str(count) + "_video.avi",
cv2.VideoWriter_fourcc(*"MJPG"),
frame_rate,
(int(cap.get(3)), int(cap.get(4))),
)
# # Define the coordinates for the right bottom corner box
box_width = int(720 / 8)
box_height = int(1280 / 8)
box_x = width - box_width
box_y = height - box_height
# # Iterate through the video and extract frames for every minute
for i in range(0, total_frames):
# print(i)
if i % 100 == 0:
print(count, i / frames_per_minute)
# # Set the frame index to the beginning of the minute
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
# # Read the frame
ret, frame = cap.read()
if ret:
# # Add the box to the frame
pos = "left" # Should add the position variable in vid names going forward XXXXXXXXXXXX
if pos == "left":
box = frame[int(height * 0.25) : height, 0 : int(width * 0.30)]
box = cv2.resize(box, (width, height))
elif pos == "right":
box = frame[int(height * 0.25) : height, int(width * 0.70) : width]
box = cv2.resize(box, (width, height))
else:
box = frame
result = modify(box, width, height)
results = hands.process(box)
# Draw the detected hand landmarks on the frame
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
# print('Hands')
mp_drawing.draw_landmarks(
box, hand_landmarks, mp_hands.HAND_CONNECTIONS
)
mp_drawing.draw_landmarks(
result, hand_landmarks, mp_hands.HAND_CONNECTIONS
)
vid_write_bg.write(box)
vid_write_ng.write(result)
else:
break
# # Release the video capture object and close all windows
cap.release()
# cv2.destroyAllWindows()
vid_write_bg.release()
vid_write_ng.release()
# # Load the video
video_path = "/kaggle/working/برنامج ٨ الصبح بلغة الإشارة - شيكو وهشام في حوار مع رحمة خالد ترجمة أميرة محي dmc_بلغة_الإشارة.mp4"
for count, link in enumerate(Link_List):
video_path = "/kaggle/working/" + str(count) + "vid.mp4"
Create_Dataset(video_path, count)
|
# # Introduction
# In this notebook we aimed to make a prediction model for legendary Pokémon. All code and libraries we used was taught in DataCamp's [Machine Learning with scikit-learn](https://app.datacamp.com/learn/courses/machine-learning-with-scikit-learn).
# # Pokemon dataset
# We chose a pokemon dataset which describe 721 pokemons (with 79 Mega evolutions) and their basic statistics.
# First, let's import some packages that we will need and read the file :
import numpy as np # Linear algebra
import pandas as pd # Data processing
import sklearn.model_selection # Various models for ML
from sklearn.neighbors import KNeighborsClassifier # KNN
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score,
) # Accuracy for KNN
from sklearn.metrics import roc_curve # ROC curve
import matplotlib.pyplot as plt # Plots
import seaborn as sns # EDA
df = pd.read_csv("../input/pokemon/Pokemon.csv") # Read dataset
print(df.head(10)) # See detail
print(df.shape) # How many rows/columns
df.columns
# More details about the columns :
# __#__: ID for each pokemon
# __Name__: Name of each pokemon
# __Type 1__: Each pokemon has a type, this determines weakness/resistance to attacks
# __Type 2__: Some pokemon are dual type and have 2
# __Total__: sum of all stats that come after this, a general guide to how strong a pokemon is
# __HP__: hit points, or health, defines how much damage a pokemon can withstand before fainting
# __Attack__: the base modifier for normal attacks (eg. Scratch, Punch)
# __Defense__: the base damage resistance against normal attacks
# __SP Atk__: special attack, the base modifier for special attacks (e.g. fire blast, bubble beam)
# __SP Def__: the base damage resistance against special attacks
# __Speed__: determines which pokemon attacks first each round
df["Generation"].value_counts().plot.bar()
# The datasets is made of the first 6 generations. A generation is a grouping of the Pokemon games that separates them based on the Pokémon they include.
print(df["Legendary"].value_counts()) # How many legendaries
# Legendary Pokémon are a group of Pokémon, generally featured in the legends and myths of the Pokémon world.
df["Type 1"].unique()
# As we can see there are many types of Pokemon. Which type is the more common ?
df["Type 1"].value_counts().plot.bar()
df1 = df.drop(["#", "Generation"], axis=1)
df1.describe()
# Let's make it visually easier to understand :
fig, ax = plt.subplots()
ax.boxplot(
[df["HP"], df["Attack"], df["Defense"], df["Sp. Atk"], df["Sp. Def"], df["Speed"]]
)
ax.set_xticklabels(
["HP", "Attack", "Defense", "Special Attack", "Special Defense", "Speed"],
rotation=45,
)
plt.show()
# The HP distribution looks flatter than the others while Attack, special attack & speed looks the same.Defense & special defense are also similar.
fig, ax = plt.subplots()
ax.boxplot([df["Total"]])
ax.set_xticklabels(["Total"], rotation=45)
plt.show()
# These characteristics are participating in the elobaration of the total score of each pokemon. We notice that there is a large distribution of the data.
# As the median is 450, a pokemon having a higher total score than 450 means that the pokemon will be stronger than 50% of pokemon he may encounter. With 60 more points, this pokemon will be stronger than 75% of pokemon.
# # Legendary pokemon
# Let's compare and see if we can find some specific characetristic from Legendary pokemon and make some comparisons with other pokemon.
# First, how much are they ?
df["Legendary"].value_counts()
# They are not numerous at all. So, we can notice a first specificty : legendary pokemons are rare !
#
# Since they are always unique to a region and it is impossible to find multiple of each in one game. Hence they are supposed to be high-tier additions to a player's team.
#
# We will now check their caracteristcs.
legendaryCounts = df[df.Legendary == True]
legendaryCounts["Type 1"].value_counts().plot.bar()
# Legendary Pokémon are often special type, such as Psychic (Mew, Mewtwo) or various elemental (Zapdos, Articuno, Moltres). This hints that they might be more inclined to use Special attack rather than regular Attack.
#
# In the table above we can see which types most Legendary Pokémon are. This points out that most of them are indeed Special Attack users. It does not mean that they cannot use physical moves, but if the would, their damage potential would not be used to the max.
sns.set_theme(style="ticks", palette="pastel")
sns.boxplot(x="Legendary", y="HP", palette=["b", "r"], data=df)
sns.despine(offset=10, trim=True)
# The dataset looks pretty flat for both of the distributions. A lot of outliers are not legendary pokemon. But they have in general (75%) more HP than the others.
sns.set_theme(style="ticks", palette="pastel")
sns.boxplot(x="Legendary", y="Attack", palette=["b", "r"], data=df)
sns.despine(offset=10, trim=True)
# A 75% of legendary pokemon's Attack level is higher than 75% of normal pokemon.
sns.set_theme(style="ticks", palette="pastel")
sns.boxplot(x="Legendary", y="Defense", palette=["b", "r"], data=df)
sns.despine(offset=10, trim=True)
# Same comment about the HP.
sns.set_theme(style="ticks", palette="pastel")
sns.boxplot(x="Legendary", y="Sp. Atk", palette=["b", "r"], data=df)
sns.despine(offset=10, trim=True)
# The distribution looks is wide compared to the other boxplots. Legendary pokemon seems to have stronger special attack than other pokemon.
sns.set_theme(style="ticks", palette="pastel")
sns.boxplot(x="Legendary", y="Sp. Def", palette=["b", "r"], data=df)
sns.despine(offset=10, trim=True)
# The special defense looks also higher for the legendary pokemon tthan other pokemon.
sns.set_theme(style="ticks", palette="pastel")
sns.boxplot(x="Legendary", y="Speed", palette=["b", "r"], data=df)
sns.despine(offset=10, trim=True)
# The distributions are really different : legendary pokemon distribution is really flat compared to the other pokemon distribution which is wide. But the legendary pokemon are still faster in most of the cases.
sns.set_theme(style="ticks", palette="pastel")
sns.boxplot(x="Legendary", y="Total", palette=["b", "r"], data=df)
sns.despine(offset=10, trim=True)
# The other pokemon distribution is really wider than legendary pokemon. We notice that the minimum and the 1st quartile are the same in the legendary distribution !
sns.set_theme(style="white")
sns.relplot(
x="Type 1",
y="Total",
hue="Legendary",
alpha=0.5,
palette="muted",
height=7.5,
data=df,
)
plt.xticks(rotation=45)
# To sum up what we found by exploring the data :
# Legendary pokemon seems to be rare, they have better HP, speed, attack, defense, special defense and strongest special attack. All of this most of the time ! Which leads logically to highest totals, which means that in general legendary pokemon are more powerful than the normal ones.
# With the last graph, we also notice that the type doesn't influence the powerfulness of legendary pokemon. It seems there is a minimum score about 590 to be a legenday pokemon.
# # EDA using Seaborn
# We used Seaborn from Chapter 1 - Classification
# Special attack is the one that has the most correlation with Total (0.75), hence we will use these for LinearRegression, KNN and overall plots.
# Create EDA with non-string values
data = pd.DataFrame(
df, columns=["Total", "HP", "Attack", "Defense", "Sp. Atk", "Sp. Def", "Speed"]
)
corrMatrix = data.corr()
f, ax = plt.subplots(figsize=(8, 6))
ax = sns.heatmap(corrMatrix, annot=True)
plt.show()
# This result implies, that Special Attack Pokémon usually have higher stats and are more effective in battle.
#
# As mentioned above, the difference between Attack and Special attack is the type of the Pokémon. Attack is related to physical damage (i.e. Tackle or Headbutt) while Special attack relates to non-connecting attacks (i.e. Psyshock, Fire Blast).
# # Linear Regression
# We used sklearn.LinearRegression from Chapter 2 - Regression
# For LR we used Sp. Atk. as feature and Total as target. It ended up with 0.55 % accuracy, which is not that great. It might not have been the best fit.
# Prepare for Linear regression on Sp. Atk and Total
regX = data.loc[:, "Sp. Atk":]
regy = pd.DataFrame(data.loc[:, "Total"])
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
pd.DataFrame(regX.loc[:, "Sp. Atk"]), regy, random_state=0
) # Train/Test split
LinearReg = sklearn.linear_model.LinearRegression().fit(
X_train, y_train
) # Linear Regression
# Create plot
x = np.array(regX["Sp. Atk"])
plt.figure(figsize=(10, 8))
plt.ylabel("Total")
plt.xlabel("Sp. Atk")
plt.title("LinearRegression score: {:.3f}".format(LinearReg.score(X_test, y_test)))
# Prepare grid
ax = plt.gca()
ax.xaxis.grid(True, alpha=0.4)
ax.yaxis.grid(True, alpha=0.4)
for spine in plt.gca().spines.values():
spine.set_visible(False)
# Scatter plot
plt.scatter(regX.loc[:, "Sp. Atk"], regy, marker="o", s=50, alpha=0.8)
# Linear regression line
plt.plot(
regX.loc[:, "Sp. Atk"],
LinearReg.coef_ * x.reshape(-1, 1) + LinearReg.intercept_,
"r-",
)
plt.show()
# # KNN
# We used sklearn.KNearestNeighbors from Chapter 2 - Regression
# Since Linear Regression was not the best way to go, we tried using the KNN method to predict legendary Pokémon. We droped string-type columns and used only numerical data.
# We ended up with 94 % accuracy.
# Create new values for KNN with no strings on X
X = df.drop(["Legendary", "Name", "Type 1", "Type 2"], axis=1)
y = df["Legendary"]
# Train/test split
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
X, y, random_state=43, test_size=0.4, stratify=y
)
# KNN with 4 neighbours
knn = KNeighborsClassifier(n_neighbors=4)
knn.fit(X_train, y_train)
predictions = knn.predict(X_test)
# TP / (TP + FP)
precision = precision_score(y_test, predictions, pos_label=None, average="weighted")
# TP / (TP + FN)
recall = recall_score(y_test, predictions, pos_label=None, average="weighted")
# F1 score
f1 = f1_score(y_test, predictions, pos_label=None, average="weighted")
# TP + (TN / No. of predictions)
accuracy = accuracy_score(y_test, predictions)
print(
"accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f"
% (accuracy, precision, recall, f1)
)
# # ROC Curve
# We used sklearn.roc_curve from Chapter 3 - Fine-tuning your model
# Since KNN had a great accuracy, we checked the ROC curve for its result and it looks great. KNN was the way to go.
# Re-do KNN
y_pred = knn.predict_proba(X_test)[:, 1]
# Create ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
# Create plot
plt.plot([0, 1], [0, 1], "k--")
plt.plot(fpr, tpr, label="Knn")
# Labels
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.title("ROC curve - Knn 4-N")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # House Prices
# With explanatory variables describing (almost) every aspect of residential homes in Ames, lowa. Using this dataset predict the final price of each home.
# Code for showing toggle button for showing/hiding the code
# While exporting this Jupyter notebook to html for presentation purpose, hide the code for analytical view
from IPython.display import HTML, Image, display
HTML(
"""
<script>
code_show=true;
function code_toggle(){
if (code_show){
$('div.input').hide();
$("#btn_toggle").val("Show Code");
}else{
$('div.input').show();
$("#btn_toggle").val("Hide Code");
}
code_show= !code_show;
}
</script>
<style>
.output_png {
display: table-cell;
text-align: center;
vertical-align: middle;
}
</style>
<form action="javascript:code_toggle()">
<input style = "float:right" type="submit" id="btn_toggle" value="Hide Code">
"""
)
# import necessary libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import mlxtend
from mlxtend.preprocessing import standardize
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
# Visualization settings
sns.set_style(style="white")
sns.set(
rc={
"figure.figsize": (12, 7),
"axes.facecolor": "white",
"axes.grid": True,
"grid.color": ".9",
"axes.linewidth": 1.0,
"grid.linestyle": "-",
},
font_scale=1.5,
)
custom_colors = ["#3498db", "#95a5a6", "#34495e", "#2ecc71", "#e74c3c"]
sns.set_palette(custom_colors)
Image("../input/houseprice-1/housesbanner_1.png")
df_train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
print("Training data loaded")
# ## Dataset description
print("Sample dataframe:\n")
df_train.head(3)
print(
f"Initial data frame contains {df_train.shape[0]} rows and {df_train.shape[1]} columns."
)
col_tags = (
"<ol>" + "".join([f"<li><h4>{col}</h4></li>" for col in df_train.columns]) + "</ol>"
)
display(HTML("<h3>Feature names:</h3>"))
display(HTML(col_tags))
print("Dataset summary :\n")
df_train.info()
#
# 3 float columns
# 35 integer columns
# 43 String columns
#
val = df_train.isnull().sum()
df = pd.DataFrame(val, columns=["NullCount"]).query("NullCount != 0")
df = pd.DataFrame({"Features": df.index, "NullCount": df.NullCount.values})
df.sort_values(by="NullCount", inplace=True, ascending=False)
sns.barplot(data=df, x="Features", y="NullCount")
plt.xticks(rotation=90)
plt.title("Features having null values")
df
print(f"Number of features having null values: {df.shape[0]}")
## <center> Features - Exploratory Data Analysis</center>
# Function for showing the statistcs and Pie chart for discrete features.
def str_discrete_analysis(fld_name):
print("Value counts: \n")
print(fld_name.value_counts())
plt.pie(
fld_name.value_counts(),
labels=fld_name.value_counts().index,
autopct=lambda x: f"{x: .2f}%",
)
plt.title(fld_name.name + " -distribution")
print("Null value count : ", fld_name.isnull().sum())
def conti_fld_analysis(fld_name):
print(fld_name.describe())
plt.subplot(2, 2, 1)
plt.hist(fld_name)
plt.subplot(2, 2, 2)
sns.boxplot(fld_name)
plt.suptitle(fld_name.name + " -distribution")
print("Null value count : ", fld_name.isnull().sum())
# ### SalePrice - Target Feature
df_train.SalePrice.describe()
plt.figure(figsize=(20, 10))
plt.subplot(2, 2, 1)
plt.hist(x=df_train.SalePrice)
plt.subplot(2, 2, 2)
sns.distplot(df_train.SalePrice)
plt.subplot(2, 2, 3)
sns.boxplot(df_train.SalePrice)
plt.suptitle("SalePrice - distribution")
print(f"Skewness in data : {df_train.SalePrice.skew()}")
#
# Continuous variable
# Positively skewed distribution.
# Presence of outliers.
# ### 1. MSSubClass
# Identifies the type of dwelling involved in the sale.
# 20 1-STORY 1946 & NEWER ALL STYLES
# 30 1-STORY 1945 & OLDER
# 40 1-STORY W/FINISHED ATTIC ALL AGES
# 45 1-1/2 STORY - UNFINISHED ALL AGES
# 50 1-1/2 STORY FINISHED ALL AGES
# 60 2-STORY 1946 & NEWER
# 70 2-STORY 1945 & OLDER
# 75 2-1/2 STORY ALL AGES
# 80 SPLIT OR MULTI-LEVEL
# 85 SPLIT FOYER
# 90 DUPLEX - ALL STYLES AND AGES
# 120 1-STORY PUD (Planned Unit Development) - 1946 & NEWER
# 150 1-1/2 STORY PUD - ALL AGES
# 160 2-STORY PUD - 1946 & NEWER
# 180 PUD - MULTILEVEL - INCL SPLIT LEV/FOYER
# 190 2 FAMILY CONVERSION - ALL STYLES AND AGES
df_train.MSSubClass.value_counts().plot.bar()
plt.title("MSSubClass-value counts")
#
# Discrete variable.
# ### 2. MSZoning
# Identifies the general zoning classification of the sale.
# A Agriculture
# C Commercial
# FV Floating Village Residential
# I Industrial
# RH Residential High Density
# RL Residential Low Density
# RP Residential Low Density Park
# RM Residential Medium Density
df_train.MSZoning.value_counts().plot.bar()
plt.title("MSZoning value counts")
#
# Categorical variable.
# ### 3. LotFrontage
# Linear feet of street connected to property
plt.hist(x=df_train.LotFrontage, bins=100)
plt.title("LotFrontage-distribution")
print("Descriptive statistics:")
df_train.LotFrontage.describe()
#
# Continuous variable.
# Missing values present.
# ## 4. LotArea
# Lot size in square feet
df_train.LotArea.describe()
plt.figure(figsize=(20, 10))
plt.subplot(2, 2, 1)
plt.hist(df_train.LotArea)
plt.subplot(2, 2, 2)
sns.boxplot(df_train.LotArea)
plt.suptitle("LotArea - distribution")
#
# Continuous variable
# ## 5. Street
# Type of road access to property
# Grvl Gravel
# Pave Paved
print(f"Unique values: \n")
np.unique(df_train.Street, return_counts=True)
plt.pie(
df_train.Street.value_counts(),
autopct=lambda s: f"{s:.2f}%",
labels=["Paved", "Gravel"],
)
plt.title("Street- distribution")
#
# String Categorical variable. |
# Almost 99% values are 'Paved'
# ### 6. Alley
# Type of alley access to property
# Grvl Gravel
# Pave Paved
# NA No alley access
#
print("Value counts:\n")
df_train.Alley.value_counts()
#
# Categorical string values.
# Presence of Null values.
# Null/Missing value indicates 'No Alley Access'.
# Hence most of the observations having 'No Alley Access'.
# ## 7. LotShape
# General shape of property
# Reg Regular
# IR1 Slightly irregular
# IR2 Moderately Irregular
# IR3 Irregular
print("Value counts:\n")
df_train.LotShape.value_counts()
plt.pie(
df_train.LotShape.value_counts(),
autopct=lambda s: f"{s:.2f}%",
labels=df_train.LotShape.value_counts().index,
)
plt.title("LotShape - distribution")
#
# String categorical variable.
# ### 8. LandContour
# Flatness of the property
# Lvl Near Flat/Level
# Bnk Banked - Quick and significant rise from street grade to building
# HLS Hillside - Significant slope from side to side
# Low Depression
print("Value counts:")
df_train.LandContour.value_counts()
plt.pie(
df_train.LandContour.value_counts(),
autopct=lambda s: f"{s:.2f}%",
labels=df_train.LandContour.value_counts().index,
)
plt.title("LandContour - distribution")
#
# Categorical String variable
# ### 9. Utilities
# Type of utilities available
# AllPub All public Utilities (E,G,W,& S)
# NoSewr Electricity, Gas, and Water (Septic Tank)
# NoSeWa Electricity and Gas Only
# ELO Electricity only
print("Value Counts:\n")
df_train.Utilities.value_counts()
#
# Categorical String variable.
# ### 10. LotConfig
# Lot configuration
# Inside Inside lot
# Corner Corner lot
# CulDSac Cul-de-sac
# FR2 Frontage on 2 sides of property
# FR3 Frontage on 3 sides of property
#
str_discrete_analysis(df_train.LotConfig)
#
# String Categorical variable.
# ### 11. LandSlope
# Slope of property
#
# Gtl Gentle slope
# Mod Moderate Slope
# Sev Severe Slope
str_discrete_analysis(df_train.LandSlope)
#
# String Categorical variable.
# ### 12. Neighborhood
# Physical locations within Ames city limits
# Blmngtn Bloomington Heights
# Blueste Bluestem
# BrDale Briardale
# BrkSide Brookside
# ClearCr Clear Creek
# CollgCr College Creek
# Crawfor Crawford
# Edwards Edwards
# Gilbert Gilbert
# IDOTRR Iowa DOT and Rail Road
# MeadowV Meadow Village
# Mitchel Mitchell
# Names North Ames
# NoRidge Northridge
# NPkVill Northpark Villa
# NridgHt Northridge Heights
# NWAmes Northwest Ames
# OldTown Old Town
# SWISU South & West of Iowa State University
# Sawyer Sawyer
# SawyerW Sawyer West
# Somerst Somerset
# StoneBr Stone Brook
# Timber Timberland
# Veenker Veenker
df_train.Neighborhood.value_counts().plot.bar()
plt.title("Neighborhood - distribution")
#
# Categorical String variable.
# ### 13. Condition1
# Proximity to various conditions
#
# Artery Adjacent to arterial street
# Feedr Adjacent to feeder street
# Norm Normal
# RRNn Within 200' of North-South Railroad
# RRAn Adjacent to North-South Railroad
# PosN Near positive off-site feature--park, greenbelt, etc.
# PosA Adjacent to postive off-site feature
# RRNe Within 200' of East-West Railroad
# RRAe Adjacent to East-West Railroad
str_discrete_analysis(df_train.Condition1)
display(HTML("<h3 style='background-color:yellow'>String Discrete variable </h3>"))
# ### 14. Condition2
# Proximity to various conditions (if more than one is present)
#
# Artery Adjacent to arterial street
# Feedr Adjacent to feeder street
# Norm Normal
# RRNn Within 200' of North-South Railroad
# RRAn Adjacent to North-South Railroad
# PosN Near positive off-site feature--park, greenbelt, etc.
# PosA Adjacent to postive off-site feature
# RRNe Within 200' of East-West Railroad
# RRAe Adjacent to East-West Railroad
str_discrete_analysis(df_train.Condition2)
# String Discrete variable
# ### 15. BldgType: Type of dwelling
#
# 1Fam Single-family Detached
# 2FmCon Two-family Conversion; originally built as one-family dwelling
# Duplx Duplex
# TwnhsE Townhouse End Unit
# TwnhsI Townhouse Inside Unit
str_discrete_analysis(df_train.BldgType)
# String Discrete variable
# ### 16. OverallQual: Rates the overall material and finish of the house
# 10 Very Excellent
# 9 Excellent
# 8 Very Good
# 7 Good
# 6 Above Average
# 5 Average
# 4 Below Average
# 3 Fair
# 2 Poor
# 1 Very Poor
str_discrete_analysis(df_train.OverallQual)
# Integer ordinal variable
# ### 17. OverallCond: Rates the overall condition of the house
# 10 Very Excellent
# 9 Excellent
# 8 Very Good
# 7 Good
# 6 Above Average
# 5 Average
# 4 Below Average
# 3 Fair
# 2 Poor
# 1 Very Poor
str_discrete_analysis(df_train.OverallCond)
# Integer ordinal variable.
# ### 18. YearBuilt: Original construction date
df_train.YearBuilt.describe()
plt.hist(df_train.YearBuilt)
plt.title("Construction Year - distribution")
#
# Categorical variable.
# ### 19. YearRemodAdd: Remodel date (same as construction date if no remodeling or additions)
df_train.YearRemodAdd.describe()
plt.hist(df_train.YearRemodAdd)
plt.title("Remodel date -distribution")
#
# Discrete variable.
# ### 20. RoofStyle: Type of roof
# Flat Flat
# Gable Gable
# Gambrel Gabrel (Barn)
# Hip Hip
# Mansard Mansard
# Shed Shed
str_discrete_analysis(df_train.RoofStyle)
#
# Categorial variable.
# ## 21. RoofMatl: Roof material
# ClyTile Clay or Tile
# CompShg Standard (Composite) Shingle
# Membran Membrane
# Metal Metal
# Roll Roll
# Tar&Grv Gravel & Tar
# WdShake Wood Shakes
# WdShngl Wood Shingles
str_discrete_analysis(df_train.RoofMatl)
#
# Discrete variable.
# ### 23. Exterior1st: Exterior covering on house
# AsbShng Asbestos Shingles
# AsphShn Asphalt Shingles
# BrkComm Brick Common
# BrkFace Brick Face
# CBlock Cinder Block
# CemntBd Cement Board
# HdBoard Hard Board
# ImStucc Imitation Stucco
# MetalSd Metal Siding
# Other Other
# Plywood Plywood
# PreCast PreCast
# Stone Stone
# Stucco Stucco
# VinylSd Vinyl Siding
# Wd Sdng Wood Siding
# WdShing Wood Shingles
str_discrete_analysis(df_train.Exterior1st)
#
# Discrete variable.
# ### 23. Exterior2nd: Exterior covering on house (if more than one material)
# AsbShng Asbestos Shingles
# AsphShn Asphalt Shingles
# BrkComm Brick Common
# BrkFace Brick Face
# CBlock Cinder Block
# CemntBd Cement Board
# HdBoard Hard Board
# ImStucc Imitation Stucco
# MetalSd Metal Siding
# Other Other
# Plywood Plywood
# PreCast PreCast
# Stone Stone
# Stucco Stucco
# VinylSd Vinyl Siding
# Wd Sdng Wood Siding
# WdShing Wood Shingles
str_discrete_analysis(df_train.Exterior2nd)
#
# Discrete variable.
# ### 23. MasVnrType: Masonry veneer type
# BrkCmn Brick Common
# BrkFace Brick Face
# CBlock Cinder Block
# None None
# Stone Stone
#
str_discrete_analysis(df_train.MasVnrType)
#
# Discrete variable.
# ### 24. MasVnrArea: Masonry veneer area in square feet
conti_fld_analysis(df_train.MasVnrArea)
#
# Continuous variable.
# ### 25. ExterQual: Evaluates the quality of the material on the exterior
#
# Ex Excellent
# Gd Good
# TA Average/Typical
# Fa Fair
# Po Poor
str_discrete_analysis(df_train.ExterQual)
#
# Ordinal variable.
# ### 26. ExterCond: Evaluates the present condition of the material on the exterior
#
# Ex Excellent
# Gd Good
# TA Average/Typical
# Fa Fair
# Po Poor
str_discrete_analysis(df_train.ExterCond)
#
# Ordinal variable.
# ### 27. Foundation: Type of foundation
#
# BrkTil Brick & Tile
# CBlock Cinder Block
# PConc Poured Contrete
# Slab Slab
# Stone Stone
# Wood Wood
str_discrete_analysis(df_train.Foundation)
#
# String categorical variable.
# ### 28. BsmtQual: Evaluates the height of the basement
# Ex Excellent (100+ inches)
# Gd Good (90-99 inches)
# TA Typical (80-89 inches)
# Fa Fair (70-79 inches)
# Po Poor (<70 inches
# NA No Basement
str_discrete_analysis(df_train.BsmtQual)
#
# Ordinal variable.
# ### 29. BsmtCond: Evaluates the general condition of the basement
# Ex Excellent
# Gd Good
# TA Typical - slight dampness allowed
# Fa Fair - dampness or some cracking or settling
# Po Poor - Severe cracking, settling, or wetness
# NA No Basement
str_discrete_analysis(df_train.BsmtCond)
#
# Ordinal variable.
# ### 30. BsmtExposure: Refers to walkout or garden level walls
# Gd Good Exposure
# Av Average Exposure (split levels or foyers typically score average or above)
# Mn Mimimum Exposure
# No No Exposure
# NA No Basement
#
str_discrete_analysis(df_train.BsmtExposure)
#
# Ordinal variable.
# ### 31. BsmtFinType1: Rating of basement finished area
# GLQ Good Living Quarters
# ALQ Average Living Quarters
# BLQ Below Average Living Quarters
# Rec Average Rec Room
# LwQ Low Quality
# Unf Unfinshed
# NA No Basement
str_discrete_analysis(df_train.BsmtFinType1)
#
# Ordinal variable.
# ### 32. BsmtFinSF1: Type 1 finished square feet
#
conti_fld_analysis(df_train.BsmtFinSF1)
#
# Continuous variable.
# ### 33. BsmtFinType2: Rating of basement finished area (if multiple types)
# GLQ Good Living Quarters
# ALQ Average Living Quarters
# BLQ Below Average Living Quarters
# Rec Average Rec Room
# LwQ Low Quality
# Unf Unfinshed
# NA No Basement
str_discrete_analysis(df_train.BsmtFinType2)
#
# Ordinal variable.
# ### 34. BsmtFinSF2: Type 2 finished square feet
conti_fld_analysis(df_train.BsmtFinSF2)
#
# Continuous variable.
# ### 35. BsmtUnfSF: Unfinished square feet of basement area
conti_fld_analysis(df_train.BsmtUnfSF)
#
# Continuous variable.
# ### 36. TotalBsmtSF: Total square feet of basement area
conti_fld_analysis(df_train.TotalBsmtSF)
#
# Continuous variable.
# ### 37. Heating: Type of heating
#
# Floor Floor Furnace
# GasA Gas forced warm air furnace
# GasW Gas hot water or steam heat
# Grav Gravity furnace
# OthW Hot water or steam heat other than gas
# Wall Wall furnace
df_train.Heating.value_counts()
plt.pie(
df_train.Heating.value_counts(),
autopct=lambda s: f"{s:.2f}%",
labels=df_train.Heating.value_counts().index,
)
plt.title("Heating - distribution")
#
# String Categorical variable
# ### 38. HeatingQC: Heating quality and condition
# Ex Excellent
# Gd Good
# TA Average/Typical
# Fa Fair
# Po Poor
df_train.HeatingQC.value_counts()
plt.pie(
df_train.HeatingQC.value_counts(),
autopct=lambda s: f"{s:.2f}%",
labels=df_train.HeatingQC.value_counts().index,
)
plt.title("HeatingQC - distribution")
#
# Ordinal
# variable.
# ### 39. CentralAir: Central air conditioning
# N No
# Y Yes
str_discrete_analysis(df_train.CentralAir)
#
# Categorical String variable.
# 93% houses are Airconditioned.
# ### 40. Electrical: Electrical system
# SBrkr Standard Circuit Breakers & Romex
# FuseA Fuse Box over 60 AMP and all Romex wiring (Average)
# FuseF 60 AMP Fuse Box and mostly Romex wiring (Fair)
# FuseP 60 AMP Fuse Box and mostly knob & tube wiring (poor)
# Mix Mixed
str_discrete_analysis(df_train.Electrical)
# String Discrete variable
# ### 41. 1stFlrSF: First Floor square feet
conti_fld_analysis(df_train["1stFlrSF"])
# ### 42. 2ndFlrSF: Second floor square feet
conti_fld_analysis(df_train["2ndFlrSF"])
# Continuous variable.
# ### 43. LowQualFinSF: Low quality finished square feet (all floors)
#
df_train.LowQualFinSF.describe()
plt.subplot(2, 2, 1)
plt.hist(df_train.LowQualFinSF)
plt.subplot(2, 2, 2)
plt.boxplot(df_train.LowQualFinSF)
plt.suptitle("Low Quality Fisnished Square feet - distribution")
#
# Continuous variable
# ### 44. GrLivArea: Above grade (ground) living area square feet
df_train.GrLivArea.describe()
plt.hist(df_train.GrLivArea)
plt.title("GrLivArea - distribution")
plt.ylabel("Count")
# ### 45. BsmtFullBath: Basement full bathrooms
str_discrete_analysis(df_train.BsmtFullBath)
#
# Ordinal variable
# ### 46. BsmtHalfBath: Basement half bathrooms
str_discrete_analysis(df_train.BsmtHalfBath)
# Ordinal variable.
# ### 47. FullBath: Full bathrooms above grade
str_discrete_analysis(df_train.FullBath)
# Ordinal variable
# ### 48. HalfBath: Half baths above grade
str_discrete_analysis(df_train.HalfBath)
# Ordinal variable
# ### 49. BedroomAbvGr: Bedrooms above grade (does NOT include basement bedrooms)
str_discrete_analysis(df_train.BedroomAbvGr)
# Ordinal variable
# ### 50. KitchenAbvGr: Kitchens above grade
str_discrete_analysis(df_train.KitchenAbvGr)
#
# Ordinal feature.
# Most of the houses having 1 kitchen with above grade.
# ### 51. KitchenQual: Kitchen quality
# Ex Excellent
# Gd Good
# TA Typical/Average
# Fa Fair
# Po Poor
df_train.KitchenQual.value_counts()
plt.pie(
df_train.KitchenQual.value_counts(),
labels=df_train.KitchenQual.value_counts().index,
autopct=lambda x: f"{x:.2f}%",
)
plt.title("Kitchen Quality -distribution")
#
# Ordinal variable.
# 50% of the houses having average Kitchen.
# Only 7% having excellent kitchens.
# ### 52. TotRmsAbvGrd: Total rooms above grade (does not include bathrooms)
str_discrete_analysis(df_train.TotRmsAbvGrd)
# Ordinal variable.
# ### 53. Functional: Home functionality (Assume typical unless deductions are warranted)
# Typ Typical Functionality
# Min1 Minor Deductions 1
# Min2 Minor Deductions 2
# Mod Moderate Deductions
# Maj1 Major Deductions 1
# Maj2 Major Deductions 2
# Sev Severely Damaged
# Sal Salvage only
str_discrete_analysis(df_train.Functional)
# Discrete variable.
# ### 54. Fireplaces: Number of fireplaces
df_train.Fireplaces.describe()
plt.pie(
df_train.Fireplaces.value_counts(),
autopct=lambda s: f"{s:.2f}%",
labels=df_train.Fireplaces.value_counts().index,
)
plt.title("Number of Fireplaces -distribution")
#
# Ordinal variable.
# ### 55. FireplaceQu: Fireplace quality
# Ex Excellent - Exceptional Masonry Fireplace
# Gd Good - Masonry Fireplace in main level
# TA Average - Prefabricated Fireplace in main living area or Masonry Fireplace in basement
# Fa Fair - Prefabricated Fireplace in basement
# Po Poor - Ben Franklin Stove
# NA No Fireplace
str_discrete_analysis(df_train.FireplaceQu)
#
# Ordinal variable.
# 50% of the houses having good Fireplace quality.
# ### 56. GarageType: Garage location
#
# 2Types More than one type of garage
# Attchd Attached to home
# Basment Basement Garage
# BuiltIn Built-In (Garage part of house - typically has room above garage)
# CarPort Car Port
# Detchd Detached from home
# NA No Garage
str_discrete_analysis(df_train.GarageType)
#
# Discrete variable
# 63% of the houses having Attached Garage
# ### 57. GarageYrBlt: Year garage was built
df_train.GarageYrBlt.describe()
plt.hist(df_train.GarageYrBlt)
plt.title("GarageYrBlt - distribution")
#
# Discrete variable.
# ### 58. GarageFinish: Interior finish of the garage
# Fin Finished
# RFn Rough Finished
# Unf Unfinished
# NA No Garage
str_discrete_analysis(df_train.GarageFinish)
#
# Ordinal variable
# Null values present.
# ### 59. GarageCars: Size of garage in car capacity
str_discrete_analysis(df_train.GarageCars)
#
# Ordinal variable.
# 56% of the Garages having 2 car capacity.
# ### 60. GarageArea: Size of garage in square feet
conti_fld_analysis(df_train.GarageArea)
#
# Continuous variable.
# ### 61. GarageQual: Garage quality
# Ex Excellent
# Gd Good
# TA Typical/Average
# Fa Fair
# Po Poor
# NA No Garage
str_discrete_analysis(df_train.GarageQual)
# Most of Garage quality is average.
# Ordinal variable.
# ### 62. GarageCond: Garage condition
# Ex Excellent
# Gd Good
# TA Typical/Average
# Fa Fair
# Po Poor
# NA No Garage
str_discrete_analysis(df_train.GarageCond)
#
# Ordinal variable.
# Most of the Garage condition is Average.
# ### 63. PavedDrive: Paved driveway
# Y Paved
# P Partial Pavement
# N Dirt/Gravel
str_discrete_analysis(df_train.PavedDrive)
#
# Ordinal variable
# Most of the houses having Paved Driveway.
# ### 64. WoodDeckSF: Wood deck area in square feet
conti_fld_analysis(df_train.WoodDeckSF)
#
# Continuous variable.
# ### 65. OpenPorchSF: Open porch area in square feet
conti_fld_analysis(df_train.OpenPorchSF)
#
# Continuous variable.
# ### 66. EnclosedPorch: Enclosed porch area in square feet
df_train.EnclosedPorch.describe()
plt.subplot(2, 2, 1)
plt.hist(df_train.EnclosedPorch)
plt.subplot(2, 2, 2)
sns.boxplot(df_train.EnclosedPorch)
plt.suptitle("EnclosedPorch - distribution")
#
# Continuous variable.
# ### 67. 3SsnPorch: Three season porch area in square feet
conti_fld_analysis(df_train["3SsnPorch"])
#
# Continuous variable.
# ### 68. ScreenPorch: Screen porch area in square feet
conti_fld_analysis(df_train.ScreenPorch)
# Continuous variable.
# ### 69. PoolArea: Pool area in square feet
df_train.PoolArea.describe()
plt.subplot(2, 2, 1)
plt.hist(df_train.PoolArea, bins=20)
plt.subplot(2, 2, 2)
plt.boxplot(df_train.PoolArea)
plt.suptitle("PoolArea -distribution")
#
# Continuous variable.
# ### 70. PoolQC: Pool quality
#
# Ex Excellent
# Gd Good
# TA Average/Typical
# Fa Fair
# NA No Pool
str_discrete_analysis(df_train.PoolQC)
#
# Ordinal variable
# ### 71. Fence: Fence quality
#
# GdPrv Good Privacy
# MnPrv Minimum Privacy
# GdWo Good Wood
# MnWw Minimum Wood/Wire
# NA No Fence
df_train.Fence.value_counts()
#
# Ordinal variable.
# Presence of missing values.
# ### 72. MiscFeature: Miscellaneous feature not covered in other categories
#
# Elev Elevator
# Gar2 2nd Garage (if not described in garage section)
# Othr Other
# Shed Shed (over 100 SF)
# TenC Tennis Court
# NA None
str_discrete_analysis(df_train.MiscFeature)
# Discrete variable.
# ### 73. MiscVal: $Value of miscellaneous feature
conti_fld_analysis(df_train.MiscVal)
# Continuous variable.
# ### 74. MoSold: Month Sold (MM)
#
print(f"Unique values: {df_train.MoSold.unique().tolist()}")
#
# Discrete variable.
# ### 75. YrSold: Year Sold (YYYY)
df_train.YrSold.describe()
plt.hist(df_train.YrSold)
plt.xticks(df_train.YrSold.unique().tolist())
plt.title("Year Sold - distribution")
#
# Discrete variable.
# Around 300 number of sales happened in year 2006 to 2009.
# 2010 has less selling happened.
# ### 76. SaleType: Type of sale
#
# WD Warranty Deed - Conventional
# CWD Warranty Deed - Cash
# VWD Warranty Deed - VA Loan
# New Home just constructed and sold
# COD Court Officer Deed/Estate
# Con Contract 15% Down payment regular terms
# ConLw Contract Low Down payment and low interest
# ConLI Contract Low Interest
# ConLD Contract Low Down
# Oth Other
str_discrete_analysis(df_train.SaleType)
# Discrete variable.
# Most of the sales are Warranty Deed -Conventional
# ### 77. SaleCondition: Condition of sale
# Normal Normal Sale
# Abnorml Abnormal Sale - trade, foreclosure, short sale
# AdjLand Adjoining Land Purchase
# Alloca Allocation - two linked properties with separate deeds, typically condo with a garage unit
# Family Sale between family members
# Partial Home was not completed when last assessed (associated with New Homes)
df_train.SaleCondition.value_counts()
plt.pie(
df_train.SaleCondition.value_counts(),
autopct=lambda s: f"{s:.2f}%",
labels=df_train.SaleCondition.value_counts().index,
)
plt.title("Sale condition - distribution")
#
# Categorical String variable.
# Most Sale are 'Normal'
# ### 78. HouseStyle
# 1Story One story
# 1.5Fin One and one-half story: 2nd level finished
# 1.5Unf One and one-half story: 2nd level unfinished
# 2Story Two story
# 2.5Fin Two and one-half story: 2nd level finished
# 2.5Unf Two and one-half story: 2nd level unfinished
# SFoyer Split Foyer
# SLvl Split Level
str_discrete_analysis(df_train.HouseStyle)
#
# String discrete variable
# ## Feature Relationships
# ### 1. Sale price based on the Neighborhood
grp_neighbrhd = df_train.groupby("Neighborhood")
unique_neighbrhd = df_train.Neighborhood.unique().tolist()
neighbrhd_list, SalePrice_list = [], []
for item in unique_neighbrhd:
df = grp_neighbrhd.get_group(item)
neighbrhd_list.append(item)
SalePrice_list.append(df.SalePrice.mean())
df_neighbrhd = pd.DataFrame(
{"Neighborhood": neighbrhd_list, "SalePrice": SalePrice_list}
)
df_neighbrhd.sort_values("SalePrice", ascending=False, inplace=True)
sns.barplot(data=df_neighbrhd, x="Neighborhood", y="SalePrice")
plt.xticks(rotation=90)
plt.ylabel("Avg SalePrice")
plt.title("Average SalePrice based on Neighborhood")
del [grp_neighbrhd, df_neighbrhd, df, unique_neighbrhd]
#
# Houses in Northridge got highest average saleprice.
# And Meadow Village got the least average saleprice.
# ### 2. OverAll quality of the house and SalePrice
grp_overallqual = df_train.groupby("OverallQual")
unique_overallqual = df_train.OverallQual.unique()
sale_price_list, overall_qual = [], []
for item in unique_overallqual:
df = grp_overallqual.get_group(item)
overall_qual.append(item)
sale_price_list.append(df.SalePrice.mean())
df_overallqual = pd.DataFrame(
{"OverallQual": overall_qual, "SalePrice": sale_price_list}
)
sns.lineplot(data=df_overallqual, x="OverallQual", y="SalePrice")
plt.title("SalePrice based on OverAllQuality")
plt.ylabel("Average SalePrice")
plt.xlabel("OverAll Quality")
#
# It's clear from the dataset that As the quality of the house increases, Saleprice also increases.
#
del (df, df_overallqual)
# ### 3. Age of the house and SalePrice
df = df_train.loc[:, ["YearBuilt", "YrSold", "SalePrice"]].copy()
df["Age"] = df.YrSold - df.YearBuilt
print("House age statistics:\n")
df.Age.describe()
grp_age = df.groupby("Age")
unique_age = df.Age.unique()
age_list, sale_price_list, count_list = [], [], []
for age in unique_age:
df_sale = grp_age.get_group(age)
age_list.append(age)
sale_price_list.append(df_sale.SalePrice.mean())
count_list.append(df_sale.Age.count())
df_sale = pd.DataFrame(
{"Age": age_list, "SalePrice": sale_price_list, "Count": count_list}
)
sns.lineplot(data=df_sale, x="Age", y="SalePrice")
plt.title("Age of the house vs. SalePrice")
plt.ylabel("Average SalePrice")
sns.lineplot(data=df_sale, x="Age", y="Count")
plt.xticks(rotation=90)
plt.title("Count of reords")
#
# Maximum age of the houses in the dataset is 136.
# There are 100 number of newly constructed houses are there.
# Average SalePrice decreases as the age of the house increases.
# However there is a sudden increae on the price for the houses whose age is 114.
# ## Data Imputation
#
df_input = df_train.copy()
def process_imputation(df_input):
df_input["PoolQC"].fillna("NA", inplace=True)
print("PoolQC - replaced missing values with NA")
df_input.MiscFeature.fillna("NA", inplace=True)
print("MiscFeature - replaced missing values with NA")
df_input.Alley.fillna("NA", inplace=True)
print("Alley - replaced missing values with NA")
df_input.Fence.fillna("NA", inplace=True)
print("Fence - replaced missing values with NA")
df_input.FireplaceQu.fillna("NA", inplace=True)
print("FireplaceQuality - replaced missing values with NA")
df_input.LotFrontage.fillna(0, inplace=True)
print("Lot frontage - replaced missing values with zero")
df_input.GarageType.fillna("NA", inplace=True)
print("Garage type - replaced missing values with NA")
print("GarageYrBlt - Replacing missing value with House built year")
df_input.GarageYrBlt.fillna(df_input.YearBuilt, inplace=True)
print("GarageFinish - Replacing missing values with NA")
df_input.GarageFinish.fillna("NA", inplace=True)
print("GarageQual - Replacing missing values with NA")
df_input.GarageQual.fillna("NA", inplace=True)
print("GarageCond - Replacing missing values with NA")
df_input.GarageCond.fillna("NA", inplace=True)
for col in ["BsmtExposure", "BsmtFinType2", "BsmtFinType1", "BsmtCond", "BsmtQual"]:
df_input[col].fillna("NA", inplace=True)
print(f"{col} - replaced missing values with NA")
df_input.MasVnrArea.fillna(0, inplace=True)
print("MasVnrArea - replaced missing values with 0")
df_input.MasVnrType.fillna("None", inplace=True)
print("MasVnrType - replaced missing values with None")
df_input.Electrical.fillna("NA", inplace=True)
print("Electrical - replaced missing values with NA")
print("Is there any missing values? ")
print(df_input.isnull().any().value_counts().index)
return df_input
df_input = process_imputation(df_input)
#
# Missing values removed from dataset.
#
#
# ## Data cleaning,manipulation
print(f"Target variable SalepPrice skewness: {df_input.SalePrice.skew()}")
sns.boxplot(data=df_input, x="SalePrice")
plt.title("SalePrice -original distribution")
total = df_input.shape[0]
threshold_price = 330000
cnt = df_input.query("SalePrice > @threshold_price").shape[0]
print(
"Target variable having outliers. Let's find how many records are there after",
threshold_price,
" :",
cnt,
" which is ",
(cnt / total) * 100,
"%",
)
# print ("Removing records with SalePrice > ", threshold_price)
# df_input.drop(df_input[df_input["SalePrice"]>threshold_price].index,inplace=True)
# plt.subplot(2,2,1)
# sns.boxplot(data=df_input,x="SalePrice")
# plt.suptitle("SalePrice -Ater removing outliers distribution");
# plt.subplot(2,2,2)
# plt.hist(data=df_input,x="SalePrice");
print("Skewness", df_input.SalePrice.skew())
print("Applying SquareRoot method of Saleprice for reducing the skewness.")
df_input["SalePrice_sqrt"] = np.sqrt(df_input.SalePrice)
plt.hist(df_input.SalePrice_sqrt)
plt.title("Square root of SalePrice - distribution")
print("Skewness: ", df_input.SalePrice_sqrt.skew())
def wrangle_data(df):
print(f"Shape of the dataframe before wrangling: {df.shape}")
# Create new feature Age from year built and year sold
df["Age"] = df["YrSold"] - df["YearBuilt"]
print("Created new feature 'Age' using Year sold and Year built")
df.drop(["YearBuilt", "YrSold"], axis=1, inplace=True)
print("Removed features - YearBuilt,YrSold")
# Below features contains meaningless value or presence of one value dominant.
# Hence This features doesn't make any sense.So removing from dataset.
del_vars = [
"Street",
"Alley",
"LandContour",
"Utilities",
"LandSlope",
"Condition2",
"RoofMatl",
"Heating",
"CentralAir",
"Electrical",
"Functional",
"MiscFeature",
"MoSold",
"Id",
]
df.drop(del_vars, inplace=True, axis=1)
ordinal_vars = [
"BsmtQual",
"BsmtFullBath",
"BsmtHalfBath",
"FullBath",
"HalfBath",
"BedroomAbvGr",
"KitchenAbvGr",
"TotRmsAbvGrd",
"Fireplaces",
"GarageCars",
"Age",
]
lbl_encoder = LabelEncoder()
for var in ordinal_vars:
df[var] = lbl_encoder.fit_transform(df[var])
for var in ["BsmtFinType1", "BsmtFinType2"]:
df[var].replace(
["GLQ", "ALQ", "BLQ", "Rec", "LwQ", "Unf", "NA"],
[0, 1, 2, 3, 4, 5, 6],
inplace=True,
)
for var in [
"ExterQual",
"ExterCond",
"BsmtCond",
"HeatingQC",
"KitchenQual",
"FireplaceQu",
"GarageQual",
"GarageCond",
"PoolQC",
]:
df[var].replace(
["Ex", "Gd", "TA", "Fa", "Po", "NA"], [0, 1, 2, 3, 4, 5], inplace=True
)
for var in ["BsmtExposure"]:
df[var].replace(["Gd", "Av", "Mn", "No", "NA"], [0, 1, 2, 3, 4], inplace=True)
for var in ["GarageFinish"]:
df[var].replace(["Fin", "RFn", "Unf", "NA"], [0, 1, 2, 3], inplace=True)
for var in ["PavedDrive"]:
df[var].replace(["Y", "P", "N"], [0, 1, 2], inplace=True)
for var in ["Fence"]:
df[var].replace(
["GdPrv", "MnPrv", "GdWo", "MnWw", "NA"], [0, 1, 2, 3, 4], inplace=True
)
df = pd.get_dummies(df)
print(f"Shape of the dataframe after wrangling {df.shape}")
return df
df_input = wrangle_data(df_input)
df_input.drop("SalePrice", axis=1, inplace=True)
print("Removed the feature SalePrice")
# Checking for any String fields.
print("List of any string data?")
df_input.dtypes[df_input.dtypes == "object"]
corr_matrix = df_input.corr()
corr_matrix = pd.DataFrame(corr_matrix["SalePrice_sqrt"]).sort_values(
"SalePrice_sqrt", ascending=False
)
negative_corr_flds = corr_matrix[corr_matrix["SalePrice_sqrt"] < 0].index.tolist()
# Remove negative correlated features
df_input.drop(negative_corr_flds, axis=1, inplace=True)
# Removing below derived features from dataset, since they are not present in the final test file.
df_input.drop(
[
"HouseStyle_2.5Fin",
"Exterior1st_ImStucc",
"Exterior1st_Stone",
"Exterior2nd_Other",
],
axis=1,
inplace=True,
)
# Create independent and dependent features for model training.
x = df_input.drop("SalePrice_sqrt", axis=1)
y = df_input.SalePrice_sqrt
training_features = x.columns.tolist()
# Training and Testing split.
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1)
print(
f"Shape of x train: {x_train.shape}, y train: {y_train.shape},x test: {x_test.shape}, y test: {y_test.shape}"
)
# ## Model Evaluation
# Function for providing generalized results for regression model
def evaluate_model(model, x_train, y_train, x_test, y_test):
model.fit(x_train, y_train)
print(
f"Training score: {model.score(x_train,y_train)} \nTesting score: {model.score(x_test,y_test)}"
)
y_pred = model.predict(x_test)
print("Prediction completed")
df = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
# Applying square function to transform to original target variable.
df = df.apply(np.square)
# Finding the difference between original and predicted
df["difference"] = df.Predicted - df.Actual
df.reset_index(inplace=True)
# Plot actual vs predicted
plt.figure(figsize=(20, 10))
sns.scatterplot(data=df, x="index", y="Actual", color="lightgrey", label=["Actual"])
sns.lineplot(data=df, x="index", y="Predicted", color="red", label=["Predicted"])
plt.legend(loc="right", bbox_to_anchor=(1.1, 1))
plt.title("Actual vs Predicted")
plt.show()
return model
# ### Model: LinearRegression
lr_model = evaluate_model(
LinearRegression(normalize=True), x_train, y_train, x_test, y_test
)
# ### Model: Lasso Regression
lasso_reg = evaluate_model(
Lasso(alpha=0.01, normalize=True), x_train, y_train, x_test, y_test
)
# ### Model: Ridge Regression
ridge_reg = evaluate_model(Ridge(normalize=True), x_train, y_train, x_test, y_test)
# ### Model: ElasticNet Regression
el = evaluate_model(ElasticNet(normalize=False), x_train, y_train, x_test, y_test)
# ### Model: XtremeGradientBoosting
xgb_reg = evaluate_model(XGBRegressor(), x_train, y_train, x_test, y_test)
del (df, df_input, df_sale, df_train)
# ## Test file output prediction
df_test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
print("Test file for prediction loaded.")
df_input = df_test.copy()
print("Shape of test data: ", df_input.shape)
df_input = process_imputation(df_input)
val = df_input.isnull().sum()
df = pd.DataFrame(val, columns=["NullCount"]).query("NullCount != 0")
df = pd.DataFrame({"Features": df.index, "NullCount": df.NullCount.values})
df.sort_values(by="NullCount", inplace=True, ascending=False)
print("Fields with missing values:\n", df)
for col in [
"MSZoning",
"Utilities",
"Functional",
"Exterior1st",
"Exterior2nd",
"KitchenQual",
"SaleType",
]:
df_input[col].fillna("NA", inplace=True)
print(col, "Replaced missing values with NA")
for col in [
"BsmtFullBath",
"BsmtHalfBath",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"GarageCars",
"GarageArea",
]:
df_input[col].fillna(0, inplace=True)
print(col, "Replaced missing values with 0")
val = df_input.isnull().sum()
df = pd.DataFrame(val, columns=["NullCount"]).query("NullCount != 0")
df = pd.DataFrame({"Features": df.index, "NullCount": df.NullCount.values})
df.sort_values(by="NullCount", inplace=True, ascending=False)
print("Fields with missing values:\n", df)
df_result = pd.DataFrame()
df_result["Id"] = df_input.Id
df_input = wrangle_data(df_input)
print("Any string features: ")
# Checking for any String fields.
df_input.dtypes[df_input.dtypes == "object"]
# Remove negative correlated features
df_input.drop(negative_corr_flds, axis=1, inplace=True)
test_features = df_input.columns
print("Columns Not present in test_features: \n")
for x in training_features:
if x not in test_features:
print(x)
print("\nColumns Not present in train_features:\n")
for x in test_features:
if x not in training_features:
print(x)
# Removing below unwanted columns
df_input.drop(
["Exterior1st_NA", "MSZoning_NA", "SaleType_NA", "Exterior2nd_NA"],
axis=1,
inplace=True,
)
x = df_input
y_predict = lasso_reg.predict(x)
y_predict = np.square(y_predict)
df_result["SalePrice"] = pd.Series(y_predict.tolist())
df_result
print("Exporting predicted results")
df_result.to_csv("predict_result.csv", index=False)
# end of file
|
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
from textblob import TextBlob
from wordcloud import WordCloud
tweet_df = pd.read_csv(
"../input/all-covid19-vaccines-tweets/vaccination_all_tweets.csv"
)
tweet_df = tweet_df[["user_location", "date", "text"]]
tweet_df["date"] = pd.to_datetime(tweet_df["date"])
tweet_df = tweet_df.drop_duplicates("text")
tweet_df.isna().sum()
tweet_df["user_location"] = tweet_df["user_location"].fillna(
tweet_df["user_location"].mode()[0]
)
def clean_data(text):
text = re.sub(r"@\w+", "", text)
text = re.sub(r"#", "", text)
text = re.sub(r"RT[\s]+", "", text)
text = re.sub(r"https?:\/\/\S+", "", text)
text = text.lower()
return text
tweet_df["text"] = tweet_df["text"].apply(clean_data)
tweet_df.head()
words = " ".join([word for word in tweet_df["text"]])
word_cloud = WordCloud(
width=1000, height=500, random_state=20, max_font_size=120
).generate(words)
fig, ax = plt.subplots(figsize=(12, 6))
plt.imshow(word_cloud, interpolation="bilinear")
plt.axis("off")
def get_subjectivity(text):
return TextBlob(text).sentiment.subjectivity
def get_polarity(text):
return TextBlob(text).sentiment.polarity
def get_sentiment(score):
if score > 0:
return "Positive"
elif score == 0:
return "Neutral"
else:
return "Negative"
tweet_df["subjectivity"] = tweet_df["text"].apply(get_subjectivity)
tweet_df["polarity"] = tweet_df["text"].apply(get_polarity)
tweet_df["sentiment"] = tweet_df["polarity"].apply(get_sentiment)
tweet_df.head()
def remove_border():
for i in ["top", "right", "bottom", "left"]:
ax.spines[i].set_visible(False)
fig = plt.figure(figsize=(12, 6))
plt.scatter(tweet_df["polarity"], tweet_df["subjectivity"], s=4)
plt.ylabel("Subjectivity")
plt.xlabel("Polarity")
fig = plt.figure(figsize=(12, 6))
sns.catplot(x="subjectivity", y="polarity", data=tweet_df)
|
# 
# # The Iris dataset is a classic machine learning dataset that is often used for classification problems. It is sometimes referred to as the "Hello World" of machine learning because it is a simple and well-understood dataset that is often used for teaching and learning purposes.
# ## The dataset consists of 150 samples, each representing an iris flower. Each sample has four features: sepal length, sepal width, petal length, and petal width. The target variable is the species of the iris flower, which can be one of three classes: setosa, versicolor, and virginica.
# ### Here is a summary of the dataset:
# ### Number of samples: 150
# ### Number of features: 4
# ### Target variable: Species (setosa, versicolor, virginica)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_iris
df = pd.read_csv("/kaggle/input/iris-classifier/iris.csv")
df.head()
df.isnull().sum()
df.shape
df.describe()
# # EDA
X = df.iloc[:, [0, 1, 2, 3]].values
y = df.iloc[:, 4].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.15, random_state=0
)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
X_train, X_cv, y_train, y_cv = train_test_split(X, y, test_size=0.15, random_state=0)
print(X_train.shape)
print(X_cv.shape)
print(y_train.shape)
print(y_cv.shape)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
df.hist(figsize=(8, 8))
corr_matrix = df.corr()
sns.heatmap(corr_matrix, annot=True)
from sklearn.tree import DecisionTreeClassifier
DecisionTree_model = DecisionTreeClassifier()
DecisionTree_model.fit(X_train, y_train)
y_pred = DecisionTree_model.predict(X_test)
df["species"] = df.species.astype("category").cat.codes
df["species"]
from sklearn import metrics
Accuracy = metrics.accuracy_score(y_test, y_pred)
Accuracy
print(y_pred)
print(y_test)
from sklearn.ensemble import RandomForestClassifier
RandomForest_model = RandomForestClassifier(n_estimators=200, max_depth=10)
RandomForest_model.fit(X_train, y_train)
y_pred = RandomForest_model.predict(X_test)
from sklearn import metrics
Accuracy = metrics.accuracy_score(y_test, y_pred)
Accuracy
plt.bar(df["species"], color="maroon", width=0.4, height=10)
sns.pairplot(df[["sepal_length", "sepal_width", "petal_length", "petal_width"]])
sns.pairplot(df, hue="species")
df.sepal_length.plot.density(color="green")
plt.title("density plot for sepal lengths")
plt.show()
|
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from deslib.dcs.ola import OLA
from matplotlib import pyplot
def get_dataset():
X, y = make_classification(
n_samples=10000, n_features=20, n_informative=15, n_redundant=5, random_state=7
)
return X, y
def get_models():
models = dict()
for n in range(2, 22):
models[str(n)] = OLA(k=n)
return models
def evaluate_model(model):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring="accuracy", cv=cv, n_jobs=-1)
return scores
X, y = get_dataset()
models = get_models()
results, names = list(), list()
for name, model in models.items():
scores = evaluate_model(model)
results.append(scores)
names.append(name)
print("%s %.3f (%.3f)" % (name, mean(scores), std(scores)))
pyplot.boxplot(results, labels=names, showmeans=True)
pyplot.show()
|
# # Style Transfer with Deep Neural Networks
# Style transfer relies on separating the content and style of an image. Given one content image and one style image, we aim to create a new, target image which should contain our desired content and style components:
# * objects and their arrangement are similar to that of the content image
# * style, colors, and textures are similar to that of the style image
# An example is shown below, where the content image is of a cat, and the style image is of Hokusai's Great Wave. The generated target image still contains the cat but is stylized with the waves, blue and beige colors, and block print textures of the style image!
# 
# ## How does it work?
# Neural style transfer is an optimization technique used to take three images, a content image, a style reference image (such as an artwork by a famous painter), and the input image you want to style — and blend them together such that the input image is transformed to look like the content image, but “painted” in the style of the style image.
# "it is possible to separate the style representation and content representations in a CNN, learnt during a computer vision task."
# Following this concept, First content and style features are extracted and stored. The style image **a** is passed through the network and its style representation **Al** on all layers included are computed and stored.The content image **p** is passed through the network and the content representation **Pl** in one layer is stored. Then a random white noise image **x** is passed through the network and its style features **Gl** and content features **Fl** are computed. On each layer included in the style representation, the element-wise mean squared difference between style features **Gl** and **Al** is computed to give the style loss **Ls**.
# Also the mean squared difference between **Fl** and **Pl** is computed to give the content loss **Lc**. The total loss **Lt** is then a linear combination between the content and the style loss.Its derivative with respect to the pixel values can be computed using error back-propagation.This gradient is used to iteratively update the image **x** until it simultaneously matches the style features of the style image **a** and the content features of the content image **p**.
# ## Lets code
# we'll use a pre-trained VGG19 Net to extract content or style features from a passed in image. We'll then formalize the idea of content and style losses and use those to iteratively update our target image until we get a result that we want.
#
# import resources
from PIL import Image
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
import requests
from torchvision import transforms, models
# # Load in VGG19 (features)
# VGG19 is split into two portions:
# * vgg19.features, which are all the convolutional and pooling layers
# * vgg19.classifier, which are the three linear, classifier layers at the end
# you only need the features portion, which you're going to load in and "freeze" the weights of, below.
# get the "features" portion of VGG19 (we will not need the "classifier" portion)
vgg = models.vgg19(pretrained=True).features
# freeze all VGG parameters since we're only optimizing the target image
for param in vgg.parameters():
param.requires_grad_(False)
# move the model to GPU, if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vgg.to(device)
# ### Load in Content and Style Images
def load_image(img_path, max_size=700, shape=None):
if "http" in img_path:
response = requests.get(img_path)
image = Image.open(BytesIO(response.content)).convert("RGB")
else:
image = Image.open(img_path).convert("RGB")
# large images will slow down processing
if max(image.size) > max_size:
size = max_size
else:
size = max(image.size)
if shape is not None:
size = shape
in_transform = transforms.Compose(
[
transforms.Resize(size),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
# discard the transparent, alpha channel (that's the :3) and add the batch dimension
image = in_transform(image)[:3, :, :].unsqueeze(0)
return image
# Next, I'm loading in images by file name and forcing the style image to be the same size as the content image.
content_image_ = "https://images.unsplash.com/photo-1426122402199-be02db90eb90?ixlib=rb-1.2.1&ixid=MXwxMjA3fDB8MHxwaG90by1yZWxhdGVkfDh8fHxlbnwwfHx8&auto=format&fit=crop&w=400&q=60"
style_image_ = "https://images.unsplash.com/photo-1583531172005-814191b8b6c0?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=675&q=80"
# load in content and style image
content = load_image(content_image_).to(device)
# Resize style to match content, makes code easier
style = load_image(style_image_, shape=content.shape[-2:]).to(device)
# helper function for un-normalizing an image
# and converting it from a Tensor image to a NumPy image for display
def im_convert(tensor):
"""Display a tensor as an image."""
image = tensor.to("cpu").clone().detach()
image = image.numpy().squeeze()
image = image.transpose(1, 2, 0)
image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))
image = image.clip(0, 1)
return image
# display the images
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
# content and style ims side-by-side
ax1.imshow(im_convert(content))
ax2.imshow(im_convert(style))
# ## VGG19 Layers
# To get the content and style representations of an image, you have to pass an image forward throug the VGG19 network until we get to the desired layer(s) and then get the output from that layer.
# 
# ### Content and Style Features
def get_features(image, model, layers=None):
# Need the layers for the content and style representations of an image
if layers is None:
layers = {
"0": "conv1_1",
"5": "conv2_1",
"10": "conv3_1",
"19": "conv4_1",
"21": "conv4_2", ## content representation
"28": "conv5_1",
}
features = {}
x = image
# model._modules is a dictionary holding each module in the model
for name, layer in model._modules.items():
x = layer(x)
if name in layers:
features[layers[name]] = x
return features
# ## Gram Matrix
# The output of every convolutional layer is a Tensor with dimensions associated with the batch_size, a depth, d and some height and width (h, w). The Gram matrix of a convolutional layer can be calculated as follows:
# * Get the depth, height, and width of a tensor using batch_size, d, h, w = tensor.size()
# * Reshape that tensor so that the spatial dimensions are flattened
# * Calculate the gram matrix by multiplying the reshaped tensor by it's transpose
# Note: You can multiply two matrices using torch.mm(matrix1, matrix2).
def gram_matrix(tensor):
## get the batch_size, depth, height, and width of the Tensor
_, d, h, w = tensor.size()
## reshape it, so we're multiplying the features for each channel
tensor = tensor.view(d, h * w)
## calculate the gram matrix
gram = torch.mm(tensor, tensor.t)
return gram
# ## Putting it all Together
# Now that we've written functions for extracting features and computing the gram matrix of a given convolutional layer; let's put all these pieces together! We'll extract our features from our images and calculate the gram matrices for each layer in our style representation.
# get content and style features only once before forming the target image
content_features = get_features(content, vgg)
style_features = get_features(style, vgg)
# calculate the gram matrices for each layer of our style representation
style_grams = {layer: gram_matrix(style_features[layer]) for layer in style_features}
# create a third "target" image and prep it for change
# it is a good idea to start off with the target as a copy of our *content* image
# then iteratively change its style
target = content.clone().requires_grad_(True).to(device)
|
# Here I use Long Short-Term Memory model to predic Google Stock Price. And I also use PyTorch and Lightning packages to make it easier and faster to build and train the deep learning model.
# ## 1. Load Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from sklearn.preprocessing import MinMaxScaler
import pytorch_lightning as pl
from torch.utils.data import Dataset, DataLoader
# ## 2. Define a PyTorch Lightning Module
class StockDataset(Dataset):
def __init__(self, data):
self.data = data
self.scaler = MinMaxScaler()
self.data = self.scaler.fit_transform(self.data)
def __len__(self):
return len(self.data) - 1
def __getitem__(self, idx):
x = torch.Tensor(self.data[idx : idx + 1, :-1])
y = torch.Tensor(self.data[idx + 1 : idx + 2, -1])
return x, y
class StockLSTM(pl.LightningModule):
def __init__(self, input_size=4, hidden_size=64, num_layers=2, output_size=1):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.lstm = nn.LSTM(input_size, hidden_size, num_layers)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
lstm_out, _ = self.lstm(x)
y_pred = self.fc(lstm_out[:, -1, :])
return y_pred
def training_step(self, batch, batch_idx):
x, y = batch
y_pred = self(x)
loss = nn.functional.mse_loss(y_pred, y)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_pred = self(x)
loss = nn.functional.mse_loss(y_pred, y)
self.log("val_loss", loss)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.001)
# ## 3. Import the Data
train_data = pd.read_csv(
"/kaggle/input/gooogle-stock-price/Google_Stock_Price_Train.csv"
)
test_data = pd.read_csv("/kaggle/input/gooogle-stock-price/Google_Stock_Price_Test.csv")
train_data.head()
test_data.head()
# ## 4. Data Preprocessing
# ### 4.1 Convert data types
train_data.info()
test_data.info()
# "Date" should be datetime type, "Close" should be float64, and "Volume" should be integer. And "Close", "Volume" look like only number in there. But if we try to convert the column to numeric values, it will cause error.
# # I commented this because I don't want to create an error when I run all of the code at once.
# train_data['Close'] = pd.to_numeric(train_data['Close'])
# train_data['Volume'] = pd.to_numeric(train_data['Volume'])
# test_data['Volume'] = pd.to_numeric(test_data['Volume'])
# # ValueError: Unable to parse string "1,008.64" at position 451
# # ValueError: Unable to parse string "7,380,500" at position 0
# # ValueError: Unable to parse string "1,657,300" at position 0
train_data.replace(",", "", regex=True, inplace=True)
test_data.replace(",", "", regex=True, inplace=True)
train_data["Date"] = pd.to_datetime(train_data["Date"])
test_data["Date"] = pd.to_datetime(test_data["Date"])
train_data["Close"] = pd.to_numeric(train_data["Close"])
train_data["Volume"] = pd.to_numeric(train_data["Volume"])
test_data["Volume"] = pd.to_numeric(test_data["Volume"])
# Verify the result and see is there any missing value.
train_data.info()
test_data.info()
train_data.isna().sum()
test_data.isna().sum()
# ### 4.2 Set "Date" to index and Convert datasets to PyTorch tensors
train_data = train_data.set_index("Date")
test_data = test_data.set_index("Date")
# ### 4.3 Normalize the data
# scaler = MinMaxScaler()
# train_data = scaler.fit_transform(train_data)
# test_data = scaler.transform(test_data)
# ### 4.4 Prepare the data loaders
train_dataset = StockDataset(train_data)
test_dataset = StockDataset(test_data)
train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=0)
test_dataloader = DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=0)
# ## 5. Train the Model
model = StockLSTM()
trainer = pl.Trainer(max_epochs=1500, gpus=1)
trainer.fit(model, train_dataloader, test_dataloader)
test_obs = []
test_preds = []
model.eval()
with torch.no_grad():
for x, y in test_dataloader:
y_pred = model(x)
test_preds.extend(y_pred.cpu().numpy()[:, 0])
test_obs.extend(y.cpu().numpy())
test_obs = np.array(test_obs)
test_preds = np.array(test_preds)
plt.plot(test_obs, label="Observed")
plt.plot(test_preds, label="Predicted")
plt.legend()
plt.show()
|
# Image Similarity - Open CV
# <div style="color:white;display:fill;
# background-color:#e38e05;font-size:150%;
# font-family:Nexa;letter-spacing:0.5px">
# 1.1 About SSIM ?
# > * SSIM that **Structural Similarity Index Measure** that keep the image **visible structure** fit even if color conversion process
# > * SSIM is used as a **method /metric to measure the similarity between two given images**
# <div style="color:white;display:fill;
# background-color:#e38e05;font-size:150%;
# font-family:Nexa;letter-spacing:0.5px">
# 1.2 SSIM Application ?
# > * Object Detection
# > * Object Recognision
# > * Symentric Segmention
# > * Instance Based Segmentation
# Libraries:
from skimage.metrics import structural_similarity as ssim
import numpy as np
import matplotlib.pyplot as plt
import cv2
# Define Function of MSE
def mean_squred_error(image01, image02):
error = np.sum((image01.astype("float") - image02.astype("float")) * 2)
error = error / float(image01.shape[0] * image02.shape[1])
return error
def image_comparision(image01, image02):
m = mean_squred_error(image01, image02)
s = ssim(image01, image02)
print("Mean Squred Error is: {}\nStructural Similarity Index is: {}".format(m, s))
# Read Image
image01 = cv2.imread("../input/catimages/CatImageA/Cat.jpg")
image02 = cv2.imread("../input/catimages/CatImageB/CatB.jpg")
if image01 is None:
print("Failed to load image01")
else:
print("Image 01 load successfully")
# CVT Color Conversion
image01 = cv2.cvtColor(image01, cv2.COLOR_BGR2GRAY)
image02 = cv2.cvtColor(image02, cv2.COLOR_BGR2GRAY)
if image02 is None:
print("Failed to load Image02")
else:
print("Image 02 loaded Successfully")
# print("Image Comparision: \n", image_comparision(image01, image01))
print("Image01")
image_comparision(image01, image01)
image01.shape
image01.shape # Row & Columns [if 182, 251, 125 - row: col: color channel]
image01.shape[
1::-1
] # Give reverse or rotation infom, this output need to devide for array
# image01.shape[1::-1]/2 # tuple is immutable, need to do this np.array, that will allow to do the devision
# why divided by 2 ? to get image center point, 138 is center point of 276 while 91.5 is center point of 183
# We do the center position for rotation
np.array(
image01.shape[1::-1]
) / 2 # rotate the image (change in height & width), we are getting array by dividing the image
# Image Augmentation
tuple(
np.array(image01.shape[1::-1]) / 2
) # This tuple conversion will pass to image_center
# > * Augmentation is because of single image
# > * After augmentation we will have a different image, what will be helpful for comparision (before & after augementation)
# > * Augmentation wasn't mendatory if we do have two different image in our loaded data
# > * Generally ssim takes two different image for its operation
# > * Augmentation is not mendatory; if from the begining we had two different image
#
image_angle = 45
image_center = tuple(np.array(image01.shape[1::-1]) / 2)
image_rotation = cv2.getRotationMatrix2D(
image_center, image_angle, 1.0
) # For rotation, first need to find out image center with angle
imageWarp = cv2.warpAffine(
image01, image_rotation, image01.shape[1::-1], flags=cv2.INTER_LINEAR
)
|
import torch
import torch.nn as nn
import pytorch_lightning as pl
import random
from sklearn.model_selection import train_test_split
from pytorch_lightning import Trainer
import torch.nn.utils.rnn as rnn
from pytorch_lightning.loggers.wandb import WandbLogger
import torchmetrics
from torch.optim.lr_scheduler import StepLR
from tensorboard import program
import torch.nn.functional as F
from torch.utils.data import Dataset
from collections import Counter
def tokenize_input(word):
return [ch for ch in word]
def extract_data_from_file(filename):
data = []
with open(filename, "r") as file:
line = file.readline()
while line:
words = line.split()
if any(c.isdigit() for c in words[0]):
line = file.readline()
continue
limit = len(words) if "#" not in words else words.index("#")
data.append((tokenize_input(words[0]), [word for word in words[1:limit]]))
line = file.readline()
return data
def get_vocabularies(data):
input_vocab = Counter()
output_vocab = Counter()
for input, output in data:
for el in input:
input_vocab[el] += 1
for el in output:
output_vocab[el] += 1
return input_vocab, output_vocab
class CMUDataset(Dataset):
def __init__(self, data, input_vocab, output_vocab):
super().__init__()
self.data = data
self.input_vocab = input_vocab
self.output_vocab = output_vocab
self.input_mapping = self.get_input_mapping()
self.output_mapping = self.get_output_mapping()
self.start_token = len(self.input_vocab) + 1
self.end_token = len(self.input_vocab) + 2
self.data_text_to_numeric()
def get_input_vocab_size(self):
return len(self.input_vocab)
def get_output_vocab_size(self):
return len(self.output_vocab)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def get_input_mapping(self):
tokens = list(self.input_vocab.keys())
return dict([(token, idx + 1) for idx, token in enumerate(tokens)])
def get_output_mapping(self):
tokens = list(self.output_vocab.keys())
return dict(
[
(token, idx + len(self.input_vocab) + 3)
for idx, token in enumerate(tokens)
]
)
def data_text_to_numeric(self):
for idx, (inp, out) in enumerate(self.data):
self.data[idx] = ([self.start_token], [self.start_token])
self.data[idx][0].extend(list(map(lambda x: self.input_mapping[x], inp)))
self.data[idx][0].append(self.end_token)
self.data[idx][1].extend(list(map(lambda x: self.output_mapping[x], out)))
self.data[idx][1].append(self.end_token)
class Attention(pl.LightningModule):
def __init__(self, enc_hid_dim, dec_hid_dim):
super().__init__()
self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim)
self.v = nn.Linear(dec_hid_dim, 1, bias=False)
def forward(self, hidden, encoder_outputs):
seq_len = encoder_outputs.shape[1]
hidden = hidden.unsqueeze(1).repeat(1, seq_len, 1)
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim=2)))
# energy = [batch size, src len, dec hid dim]
attention = self.v(energy).squeeze(2)
# attention= [batch size, src len]
return F.softmax(attention, dim=1)
class PhoneticTranscriptionEncoder(pl.LightningModule):
def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout):
super().__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(
emb_dim, enc_hid_dim, num_layers=2, batch_first=True, bidirectional=True
)
self.dropout = nn.Dropout(dropout)
self.fc_hidden = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.fc_cell = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
def forward(self, x):
embedded = self.dropout(self.embedding(x))
outputs, (hidden, cell) = self.rnn(embedded)
hidden = torch.tanh(
self.fc_hidden(torch.cat((hidden[2:4, :, :], hidden[0:2, :, :]), dim=2))
)
cell = torch.tanh(
self.fc_cell(torch.cat((cell[2:4, :, :], cell[0:2, :, :]), dim=2))
)
return outputs, hidden, cell
class PhoneticTranscriptionDecoder(pl.LightningModule):
def __init__(
self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention
):
super().__init__()
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.output_dim = output_dim
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.LSTM(
emb_dim + 2 * enc_hid_dim, dec_hid_dim, num_layers=2, batch_first=True
)
self.fc_out = nn.Linear(emb_dim + dec_hid_dim + enc_hid_dim * 2, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, inp, hidden, cell, encoder_outputs):
inp = inp.unsqueeze(-1)
embedded = self.dropout(self.embedding(inp))
a = self.attention(hidden[-1].squeeze(0), encoder_outputs)
a = a.unsqueeze(1)
weighted = a @ encoder_outputs
output, (hidden, cell) = self.rnn(
torch.cat((embedded, weighted), 2), (hidden, cell)
)
output = output.squeeze(1)
weighted = weighted.squeeze(1)
embedded = embedded.squeeze(1)
prediction = self.fc_out(torch.cat((output, weighted, embedded), dim=1))
return prediction, hidden, cell
BATCH_SIZE = 4
NUM_EPOCHS = 100
LEARNING_RATE = 5e-4
def collate_batch(batch):
input_padded = rnn.pad_sequence(
[torch.tensor(x[0]) for x in batch], batch_first=True
)
output_padded = rnn.pad_sequence(
[torch.tensor(x[1]) for x in batch], batch_first=True
)
return input_padded, output_padded
class PhoneticTranscription(pl.LightningModule):
def __init__(self, encoder, decoder, train_dataset, test_dataset):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.train_dataset = train_dataset
self.test_dataset = test_dataset
self.criterion = nn.CrossEntropyLoss(ignore_index=0)
self.train_accuracy = torchmetrics.Accuracy(
task="multiclass", num_classes=decoder.output_dim
)
self.val_accuracy = torchmetrics.Accuracy(
task="multiclass", num_classes=decoder.output_dim
)
def forward(self, src, trg, teacher_forcing_ratio=0.7):
batch_size = trg.shape[0]
trg_len = trg.shape[1]
trg_vocab_size = self.decoder.output_dim
# tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
enc_outputs, hidden, cell = self.encoder(src)
inp = trg[:, 0]
# hidden = torch.zeros(hidden.shape).to(self.device)
# cell = torch.zeros(cell.shape).to(self.device)
for t in range(1, trg_len):
# insert input token embedding, previous hidden state and the context state
# receive output tensor (predictions) and new hidden state
output, hidden, cell = self.decoder(inp, hidden, cell, enc_outputs)
# place predictions in a tensor holding predictions for each token
outputs[t] = output
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = output.argmax(1)
# if teacher forcing, use actual next token as next input
# if not, use predicted token
inp = trg[:, t] if teacher_force else top1
outputs = torch.permute(outputs, (1, 0, 2))
return outputs
def training_step(self, batch, batch_idx):
src, trg = batch
output = self(src, trg)
output_dim = output.shape[-1]
output = torch.reshape(output[1:], (-1, output_dim))
trg = trg[1:].view(-1)
loss = self.criterion(output, trg)
self.train_accuracy(output, trg)
self.log(
"train_acc",
self.train_accuracy,
on_step=True,
on_epoch=True,
batch_size=BATCH_SIZE,
)
self.log(
"train_loss",
loss.item(),
on_step=True,
on_epoch=True,
batch_size=BATCH_SIZE,
)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
src, trg = batch
output = self(src, trg, 0)
output_dim = output.shape[-1]
output = torch.reshape(output[1:], (-1, output_dim))
trg = trg[1:].view(-1)
loss = self.criterion(output, trg)
self.val_accuracy(output, trg)
self.log(
"val_acc",
self.val_accuracy,
on_step=True,
on_epoch=True,
batch_size=BATCH_SIZE,
)
self.log(
"val_loss", loss.item(), on_step=True, on_epoch=True, batch_size=BATCH_SIZE
)
return {"val_loss": loss}
def train_dataloader(self):
return torch.utils.data.DataLoader(
dataset=self.train_dataset,
batch_size=BATCH_SIZE,
num_workers=2,
shuffle=True,
collate_fn=collate_batch,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
dataset=self.test_dataset,
batch_size=BATCH_SIZE,
num_workers=2,
shuffle=False,
collate_fn=collate_batch,
)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=LEARNING_RATE)
scheduler = StepLR(optimizer, step_size=1, gamma=0.6)
return [optimizer], [scheduler]
# training
if __name__ == "__main__":
torch.manual_seed(42)
data = extract_data_from_file("/kaggle/input/cmudict/cmudict.txt")
train_data, test_data = train_test_split(data, test_size=0.2)
in_voc, out_voc = get_vocabularies(train_data)
train_dataset = CMUDataset(train_data, in_voc, out_voc)
test_dataset = CMUDataset(test_data, in_voc, out_voc)
INPUT_DIM = train_dataset.get_input_vocab_size() + 3
OUTPUT_DIM = INPUT_DIM + train_dataset.get_output_vocab_size() + 1
ENC_EMB_DIM = 128
DEC_EMB_DIM = 128
ENC_HID_DIM = 256
DEC_HID_DIM = 256
ENC_DROPOUT = 0.8
DEC_DROPOUT = 0.8
attn = Attention(ENC_HID_DIM, DEC_HID_DIM)
encoder = PhoneticTranscriptionEncoder(
INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT
)
decoder = PhoneticTranscriptionDecoder(
OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn
)
model = PhoneticTranscription(encoder, decoder, train_dataset, test_dataset)
logger = WandbLogger(project="Phonetic Transcription")
trainer = Trainer(
max_epochs=NUM_EPOCHS,
logger=logger,
fast_dev_run=False,
accelerator="gpu",
devices=1,
gradient_clip_val=1.0,
)
trainer.fit(model)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch
if torch.cuda.is_available():
device = torch.device("cuda")
print("We will use the GPU:", torch.cuda.get_device_name(0))
else:
print("No GPU available, using the CPU instead.")
device = torch.device("cpu")
train_data = pd.read_csv("../input/nlp-getting-started/train.csv")
test_data = pd.read_csv("../input/nlp-getting-started/test.csv")
train_data.head()
train_data["keyword"].value_counts()
train_data["keyword"].nunique()
train_data["location"].value_counts()
train_data["text"][4]
import seaborn as sns
import matplotlib as plt
ax = sns.countplot(train_data.target)
import string
import re
import nltk
nltk.download("stopwords")
stopwords = nltk.corpus.stopwords.words("english")
# data preprocessing
def clean_text(text):
text = text.lower()
# remove hyperlinks
text = re.sub(r"http\S+", "", text)
# remove spcl characters
text = "".join([word for word in text if word not in string.punctuation])
text = re.sub("\W", " ", str(text))
# remove stopwords
text = [word for word in text.split() if word not in stopwords]
# remove any numeric characters
text = [word for word in text if re.search("\d", word) == None]
# convert split to text again
text = " ".join(word for word in text)
return text
# train_data['text_clean'] = train_data['text'].apply(lambda x: clean_text(x))
train_data["text_clean"] = train_data["text"].apply(clean_text)
test_data["text_clean"] = test_data["text"].apply(clean_text)
train_data.head()
train_data.drop(["id", "keyword", "location", "text"], axis=1)
test_data.head()
test_data.drop(["id", "keyword", "location", "text"], axis=1)
train_data["target"].value_counts()
data = train_data["text_clean"].values
labels = train_data["target"].values
# ELECTRA
from transformers import ElectraTokenizer, ElectraForSequenceClassification, AdamW
import torch
tokenizer = ElectraTokenizer.from_pretrained("google/electra-base-discriminator")
model = ElectraForSequenceClassification.from_pretrained(
"google/electra-base-discriminator", num_labels=2
)
model.cuda()
model
# important to know the max len of each sentence
import matplotlib.pyplot as plt
def plot_sentence_embeddings_length(text_list, tokenizer):
tokenized_texts = list(map(lambda t: tokenizer.tokenize(t), text_list))
tokenized_texts_len = list(map(lambda t: len(t), tokenized_texts))
fig, ax = plt.subplots(figsize=(8, 5))
ax.hist(tokenized_texts_len, bins=60)
ax.set_xlabel("Length of Comment Embeddings")
ax.set_ylabel("Number of Comments")
return max(tokenized_texts_len)
plot_sentence_embeddings_length(data, tokenizer)
a = ["Aish is nice", "jetti loves her"]
# print(list(map(lambda t: tokenizer.tokenize(t), a)))
tokenized_texts = list(map(lambda t: tokenizer.tokenize(t), a))
print(tokenized_texts)
tokenized_texts_len = list(map(lambda t: len(t), tokenized_texts))
print(tokenized_texts_len)
token_lens = []
for txt in data:
tokens = tokenizer.encode(txt, max_length=70)
token_lens.append(len(tokens))
sns.distplot(token_lens)
plt.xlim([0, 40])
plt.xlabel("Token count")
max(token_lens)
# From the graph we can conclude that the max number of tweets have less than 30 tokens. so let us take the max_len as 36.
indices = tokenizer.batch_encode_plus(
data,
max_length=38,
add_special_tokens=True,
return_attention_mask=True,
pad_to_max_length=True,
truncation=True,
)
indices.keys()
input_ids = indices["input_ids"]
attention_masks = indices["attention_mask"]
from sklearn.model_selection import train_test_split
# Use 99% for training and 1% for validation.
train_ids, val_ids, train_labels, val_labels = train_test_split(
input_ids, labels, random_state=42, test_size=0.2
)
# Do the same for the masks.
train_masks, val_masks, _, _ = train_test_split(
attention_masks, labels, random_state=42, test_size=0.2
)
len(train_ids)
# len(train_labels)
# convert data to tensors
train_ids = torch.tensor(train_ids)
val_ids = torch.tensor(val_ids)
train_labels = torch.tensor(train_labels)
val_labels = torch.tensor(val_labels)
train_masks = torch.tensor(train_masks)
val_masks = torch.tensor(val_masks)
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
# TRAINING DATA
train_data = TensorDataset(train_ids, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=32)
len(train_dataloader)
train_iter3 = iter(train_dataloader)
print(type(train_iter3))
print(len(train_iter3))
# Validation Data
val_data = TensorDataset(val_ids, val_masks, val_labels)
val_sampler = RandomSampler(val_data)
val_dataloader = DataLoader(val_data, sampler=val_sampler, batch_size=32)
optimizer = AdamW(
model.parameters(),
lr=6e-6, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps=1e-8, # args.adam_epsilon - default is 1e-8.
)
from transformers import get_linear_schedule_with_warmup
# Number of training epochs (authors recommend between 2 and 4)
epochs = 5
# Total number of training steps is number of batches * number of epochs.
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=0, num_training_steps=total_steps
)
import numpy as np
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def acc_score(y_pred, y_true):
# correct labels = 0
cor = 0
# loop over all the entries in test data
for i in range(len(y_pred)):
# if predicted = actual label, add 1 to correct labels
if y_pred[i] == y_true[i]:
cor += 1
# return accuracy score
return cor / len(y_pred)
import time
import datetime
def format_time(elapsed):
"""
Takes a time in seconds and returns a string hh:mm:ss
"""
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
import random
# This training code is based on the `run_glue.py` script here:
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
# Set the seed value all over the place to make this reproducible.
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
# Store the average loss after each epoch so we can plot them.
loss_values = []
for epoch_i in range(epochs):
print("epoch is" + str(epoch_i))
print("training...")
t0 = time.time()
total_loss = 0
model.train()
for step, batch in enumerate(
train_dataloader
): # total steps are 191... runs from step 0 to steps 190
print("step", step)
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
model.zero_grad()
outputs = model(
b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels,
)
loss = outputs[0]
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_loss / len(train_dataloader)
print("avg_train_loss", avg_train_loss)
loss_values.append(avg_train_loss)
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print("Training complete!")
# Validation
model.eval()
pred = []
true = []
eval_acc = 0
nb_eval_steps = 0
for batch in val_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to("cpu").numpy()
pred.append(logits)
true.append(label_ids)
temp_eval_acc = flat_accuracy(logits, label_ids)
eval_acc += temp_eval_acc
nb_eval_steps += 1
print(" Accuracy: {0:.2f}".format(eval_acc / nb_eval_steps))
# Combine the predictions for each batch into a single list of 0s and 1s.
flat_predictions = [item for sublist in pred for item in sublist]
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
# Combine the correct labels for each batch into a single list.
flat_true_labels = [item for sublist in true for item in sublist]
from sklearn.metrics import classification_report
print(classification_report(flat_predictions, flat_true_labels))
# Testing
model.eval()
pred = []
true = []
eval_acc = 0
nb_eval_steps = 0
for batch in val_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to("cpu").numpy()
pred.append(logits)
true.append(label_ids)
temp_eval_acc = flat_accuracy(logits, label_ids)
eval_acc += temp_eval_acc
nb_eval_steps += 1
print(" Accuracy: {0:.2f}".format(eval_acc / nb_eval_steps))
test_data = test_data.text.values
indices = tokenizer.batch_encode_plus(
test_data,
max_length=38,
add_special_tokens=True,
return_attention_mask=True,
pad_to_max_length=True,
truncation=True,
)
input_ids = indices["input_ids"]
att_mask = indices["attention_mask"]
test_ids = torch.tensor(input_ids)
test_mask = torch.tensor(att_mask)
batch_size = 32
prediction_data = TensorDataset(test_ids, test_mask)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(
prediction_data, sampler=prediction_sampler, batch_size=batch_size
)
len(test_ids)
model.eval()
predictions = []
for batch in prediction_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask = batch
with torch.no_grad():
outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
predictions.append(logits)
flat_predictions = [item for sublist in predictions for item in sublist]
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
sample_sub = pd.read_csv("../input/nlp-getting-started/sample_submission.csv")
submit = pd.DataFrame(
{"id": sample_sub["id"].values.tolist(), "target": flat_predictions}
)
submit.to_csv("submission.csv", index=False)
submit.head()
|
# # Data Frame Exercise
# # Read the data from the file Salaries Complete
import pandas as pd
df = pd.read_csv("/kaggle/input/teaching-salaries-datset/Salaries Complete.csv")
df.head()
# # Display the first 3 rows
df.head(3)
# # Display the last 4 rows
df.tail(4)
# # Remove the columns (Name) and (ID)
df.drop(["Name", "ID"], axis=1, inplace=True)
df.head()
# # Display the data of Females only
df1 = df[df["Gender"] == "f"]
df1.head()
# # Create a new column which contains the total salary and name it total
df["Total"] = (
df["Transportation"] + df["Housing"] + df["Basic"] + df["Allowance"] + df["Family"]
)
df.head()
# # Find the average of the basic salary
av = df["Basic"].mean()
av
# # Find the person who gets the highest salary
m = df["Total"].max()
p = df[df["Total"] == m]
p
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPRegressor
# Read training set and test set
train_df = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/train.csv")
test_df = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/x_test.csv")
# Select the characteristics and target factors of random forest
features = [
"Tuition_in_state",
"Tuition_out_state",
"Faculty_salary",
"Pell_grant_rate",
"SAT_average",
"ACT_50thPercentile",
"pct_White",
"pct_Black",
"pct_Hispanic",
"pct_Asian",
"Parents_middlesch",
"Parents_highsch",
"Parents_college",
]
target = "Completion_rate"
# Extracting features and target variables from training and test sets
X_train = train_df[features]
y_train = train_df[target]
X_test = test_df[features]
# Creating a Random Forest Regression Model, set the estimators, set the depth of the random forest and set random state
# Using training sets to train models
model = RandomForestRegressor(n_estimators=500, max_depth=20, random_state=4)
model.fit(X_train, y_train)
importance = model.feature_importances_
indices = np.argsort(importance)[::-1]
for f in range(X_train.shape[1]):
print("%2d) %-*s %f" % (f + 1, 30, features[indices[f]], importance[indices[f]]))
# Select the characteristics and target factors of random forest
features = [
"Tuition_in_state",
"Tuition_out_state",
"Faculty_salary",
"Pell_grant_rate",
"SAT_average",
"ACT_50thPercentile",
"pct_Black",
"pct_Asian",
"Parents_highsch",
"Parents_college",
]
target = "Completion_rate"
# Extracting features and target variables from training and test sets
X_train = train_df[features]
y_train = train_df[target]
X_test = test_df[features]
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Perform PCA
pca = PCA()
X_train_pca = pca.fit_transform(X_train_scaled)
X_test_pca = pca.transform(X_test_scaled)
# Perform Gridresearch on MLP
mlp_param_grid = {
"hidden_layer_sizes": [(64,), (128, 64, 32), (256, 128, 64, 32)],
"activation": ["identity", "logistic", "tanh", "relu"],
"solver": ["adam"],
"alpha": [0.0001, 0.001, 0.01],
"max_iter": [500],
"n_iter_no_change": [10],
}
mlp = MLPRegressor(validation_fraction=0.2, verbose=1)
mlp_grid_search = GridSearchCV(
mlp, mlp_param_grid, cv=5, scoring="neg_mean_squared_error"
)
mlp_grid_search.fit(X_train_pca, y_train.ravel())
best_mlp = mlp_grid_search.best_estimator_
y_pred_mlp = best_mlp.predict(X_test_pca)
# Output CSV file (add ID manually and delete last blank line)
name = ["Completion_rate"]
df = pd.DataFrame(columns=name, data=y_pred_mlp)
print(df)
df.to_csv("submission.csv", index=True, index_label="id")
|
# # Political Donations EDA 💸
# This is a quick starter Exploratory Data Analysis (EDA) of the Political Donations by American Sports Owners Dataset. Although I do use quite a lot of one-liners in this EDA, I think there is enough commenting and analysis for anyone to follow along and understand the code.
# Table of Contents
# Essential Imports
# Data Preprocessing
# Analysis of Individual Donations
# Analysis of Individual Donators
# Analysis of Donations to Specific Political Parties
# If you like this notebook, please give it an upvote! Let's jump right into the analysis.
# # Essential Imports
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import plotly.graph_objects as go
import plotly.offline as py
import math
import itertools
py.init_notebook_mode(connected=True)
# The variable below, ```LOOK_AT```, controls the visualizations done below. If you fork this notebook and would like to visualize more/less per graph, the easiest way to do so is by changing the value of ```LOOK_AT``` below.
LOOK_AT = 10
# # Data Preprocessing
df = pd.read_csv(
"../input/political-donations-by-american-sports-owners/sports-political-donations.csv"
)
df
# One of the most important steps is here to convert each of the columns in the dataset to its appropriate type. Here, we want to convert the column "Amount" to a float so we can utilize the vectorized implementations Pandas DataFrames offer. We do so with the simple one-liner shown below.
if df["Amount"].dtype != "int64":
df["Amount"] = (
(df["Amount"].str.replace("$", "", regex=False))
.str.replace(",", "", regex=False)
.astype("int64")
)
df
# # Analysis of Individual Donations
# Primary Questions
# * What is the distribution of donation values?
# * What are the largest individual donations in terms of amount donated?
# Distribution of Donation Values
amount = df["Amount"]
fig = px.histogram(amount)
fig.update_layout(
barmode="group",
title={
"text": f"Distribution of Donation Values",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
xaxis_title="Amount, in USD",
yaxis_title="Count",
showlegend=False,
)
fig.show()
print(
"Number of Donations: %d\nMean Donation Value: $%.2f\nMedian Donation Value: $%.2f"
% (len(amount), amount.mean(), amount.median())
)
print(
"\nAdditional Statistical Measures:\nStandard Deviation: $%.3f\nSkew: %.3f\nKurtosis: %.3f\n95%% of Data is Between $%.3f and $%.3f"
% (
amount.std(),
amount.skew(),
amount.kurtosis(),
np.quantile(amount, 0.025),
np.quantile(amount, 0.975),
)
)
# "A general guideline for skewness is that if the number is greater than +1 or lower than –1, this is an indication of a substantially skewed distribution. For kurtosis, the general guideline is that if the number is greater than +1, the distribution is too peaked" (Source).
# Clearly, this distribution has many outliers, and that's confirmed by visual analysis of the plot above. Since we are not necessarily doing predictions with this data, removing these outliers is not necessary, but it is still good to note that **the top donators in this dataset are statistical outliers.**
# Top 10 Individual Donations
donations = df.sort_values("Amount", ascending=False)
fig = px.bar(donations[:LOOK_AT], x="Owner", y="Amount")
fig.update_layout(
barmode="group",
title={
"text": f"Top {LOOK_AT} Individual Donations",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
yaxis_title="Amount, in USD",
showlegend=False,
)
fig.show()
# Yep, you read that right. San Fransisco Giants owner Charles Johnson holds the top 3 donations and accounts for 5 out of the top 10 donations in terms of numerical value (amount donated). He is clearly an outlier in this analysis (If you want to look at more/other owners in your own analysis, feel free to fork the notebook and increase the value of ```LOOK_AT``` and/or drop Charles Johnson from the data entirely).
# # Analysis of Individual Donators
# Primary Questions:
# * Which owner has donated the most TOTAL money?
# * Which owner has donated the most times?
# * Which owner donates, ON AVERAGE, the most money?
# Top Donators by Gross Value Donated
sum_df = df.groupby("Owner").sum().sort_values("Amount", ascending=False)
sum_donated = sum_df["Amount"]
fig = px.bar(sum_donated[:LOOK_AT])
fig.update_layout(
barmode="group",
title={
"text": f"Top {LOOK_AT} Individual Donators, Gross Value",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
yaxis_title="Amount, in USD",
showlegend=False,
)
fig.show()
# Unsurprisingly, Charles Johnson tops the list again with a whopping **$11.03 MILLION** donated. Let's see what percentage of donated money he claims.
pie_df = sum_df.reset_index()
fig = px.pie(pie_df, values="Amount", names="Owner")
fig.update_traces(textposition="inside")
fig.update_layout(
uniformtext_minsize=12,
uniformtext_mode="hide",
title={
"text": f"Pie Chart of Individual Donators (Total Donated: ${sum_donated.sum()})",
"x": 0.4,
"xanchor": "center",
"font": {"size": 20},
},
)
fig.show()
# Charles Johnson accounts for almost a quarter of all donations! Now, let's take a look at the distribution of individual donators.
amount = df.groupby("Owner").sum()["Amount"]
fig = px.histogram(amount)
fig.update_layout(
barmode="group",
title={
"text": f"Distribution of Donators",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
xaxis_title="Amount, in USD",
yaxis_title="Count",
showlegend=False,
)
fig.show()
print(
"Number of Unique Donators: %d\nMean Donator: $%.2f\nMedian Donator: $%.2f"
% (len(amount), amount.mean(), amount.median())
)
print(
"\nAdditional Statistical Measures:\nStandard Deviation: $%.3f\nSkew: %.3f\nKurtosis: %.3f\n95%% of Data is Between $%.3f and $%.3f"
% (
amount.std(),
amount.skew(),
amount.kurtosis(),
np.quantile(amount, 0.025),
np.quantile(amount, 0.975),
)
)
# Ok, Charles Johnson is quite obviously an outlier. Let's take a closer look at his donations.
cj_donations = df.loc[df["Owner"] == "Charles Johnson"]
cj_donations
fig = px.histogram(cj_donations["Amount"])
fig.update_layout(
barmode="group",
title={
"text": f"Charles Johnson's Donations",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
xaxis_title="Amount, in USD",
yaxis_title="Count",
showlegend=False,
)
fig.show()
# It looks like most of his donations are comparatively small, but he has a couple of extremely large donations.
# Number of Donations per Donator
count_donated = df.groupby("Owner").size().sort_values(ascending=False)
fig = px.bar(count_donated[:LOOK_AT])
fig.update_layout(
barmode="group",
title={
"text": f"Top {LOOK_AT} Donators by Number of Donations",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
yaxis_title="Count",
showlegend=False,
)
fig.show()
# What a surprise, it's Charles Johnson at the top AGAIN. It seems like he really loves to donate to political groups, huh.
fig = px.histogram(count_donated, nbins=200)
fig.update_layout(
barmode="group",
title={
"text": f"Distribution of Number of Donations Per Donator",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
xaxis_title="Donated Count",
yaxis_title="Count",
showlegend=False,
)
fig.show()
print(
"Mean Donations per Donator: %.3f\nMedian Donations per Donator: %.1f"
% (count_donated.mean(), count_donated.median())
)
print(
"\nAdditional Statistical Measures:\nStandard Deviation: %.3f\nSkew: %.3f\nKurtosis: %.3f\n95%% of Data is Between %d Donation(s) and %d Donation(s)"
% (
count_donated.std(),
count_donated.skew(),
count_donated.kurtosis(),
np.quantile(count_donated, 0.025),
np.quantile(count_donated, 0.975),
)
)
# Most sports team owners in this dataset donate less than 5 times, but there are still quite a lot of statistical outliers (I'm looking at you, Charles Johnson).
# Highest Average Donator
avg_donated = df.groupby("Owner").mean()["Amount"].sort_values(ascending=False)
fig = px.bar(avg_donated[:LOOK_AT])
fig.update_layout(
barmode="group",
title={
"text": f"Top {LOOK_AT} Donators by Average Donation",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
yaxis_title="Count",
showlegend=False,
)
fig.show()
# No Charles Johnson, very cool.
# # Analysis of Donations to Specific Political Parties
# Let's add the feature "Party" to the mix.
# Primary Questions
# * Which party receives the most donations?
# * Which party receives the most money?
# * Are there any owners that donate to multiple parties?
# Which Party Receives the Most Donations?
df["Party"].unique()
COLOR_MAP = {
"Democrat": "blue",
"Bipartisan": "yellow",
"Republican": "red",
"Bipartisan, but mostly Republican": "orange",
"Bipartisan, but mostly Democratic": "cyan",
"Independent": "green",
}
party_df = pd.DataFrame(
df.groupby("Party").size().sort_values(ascending=False), columns=["Count"]
).reset_index()
fig = px.bar(
party_df, x="Party", y="Count", color="Party", color_discrete_map=COLOR_MAP
)
fig.update_layout(
title={
"text": f"Political Parties Which Receive the Most Donations",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
yaxis_title="Count",
showlegend=False,
)
fig.show()
# Republicans receive the most donations by a decent margin.
# Which Party Receives the Most Money?
COLOR_MAP = {
"Democrat": "blue",
"Bipartisan": "yellow",
"Republican": "red",
"Bipartisan, but mostly Republican": "orange",
"Bipartisan, but mostly Democratic": "cyan",
"Independent": "green",
}
party_sum_df = (
df.groupby("Party").sum().sort_values("Amount", ascending=False).reset_index()
)
fig = px.bar(
party_sum_df, x="Party", y="Amount", color="Party", color_discrete_map=COLOR_MAP
)
fig.update_layout(
title={
"text": f"Political Parties Which Receive the Most Money",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
yaxis_title="Amount, in USD",
showlegend=False,
)
fig.show()
# The Republican party also receives the most money by sports owners.
# Polarity of Donations
# We calculate polarity as ```sum - max(donations)```. This will take into consideration all donations that are NOT the donator's primary party they donated to.
polar_df = df.groupby(["Owner", "Party"]).size()
mcc_owner = {}
for owner in df["Owner"].unique():
mcc = polar_df[owner].sum() - max(polar_df[owner])
mcc_owner[owner] = mcc
mcc_df = pd.DataFrame.from_dict(
mcc_owner, orient="index", columns=["Value"]
).sort_values("Value", ascending=False)
mcc_df
fig = px.bar(mcc_df[:LOOK_AT])
fig.update_layout(
title={
"text": f"Which Donators Have the Most Polar Donations",
"x": 0.5,
"xanchor": "center",
"font": {"size": 20},
},
yaxis_title="Value",
showlegend=False,
)
fig.show()
# Let's take a look at the person with the most polar donations, Micky Arison.
polar_df["Micky Arison"]
|
import socket
hostname = socket.gethostname()
if hostname.startswith("ug"):
hostnum = hostname.split("ug")[-1]
import sys
sys.stdout = open(f"{hostname}.txt", "w")
from random import randint
import random
import matplotlib.pyplot as plt
import numpy as np
example = "/kaggle/input/tsp-toronto/20230407_03-33_adjMatrix.txt"
fpath = example.split("adjMatrix")[0]
adjMatrix = np.loadtxt(fpath + "adjMatrix" + ".txt")
alls = np.loadtxt(fpath + "all" + ".txt").astype(int)
dropoffs = np.loadtxt(fpath + "dropoffs" + ".txt").astype(int)
pickups = np.loadtxt(fpath + "pickups" + ".txt").astype(int)
def printtsp(lst):
for num in lst:
num = alls[num]
if len(str(num)) == 3:
print("\033[32m" + str(num) + "\033[0m", end=" ") # print in green
else:
print(num, end=" ")
print() # print a new line at the end
def isPickup(x):
return x % 2 == 0
def toPickup(x):
return x - 1 # x & 0xFFFFFFFE
def toDropoff(x):
return x + 1 # x | 1
def cost(route):
cost = 0
for i in range(len(route) - 1):
cost += adjMatrix[route[i]][route[i + 1]]
return cost
import time
def timer(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(f"Elapsed_time={end_time - start_time:.2f} seconds")
return result
return wrapper
import statistics
def testDistribution(sample_count, numOfIteration, T):
def decorator(func):
def wrapper(*args, **kwargs):
trials = []
for i in range(sample_count):
trials.append(func(*args, **kwargs))
std = statistics.stdev(trials)
avg = statistics.mean(trials)
plt.clf()
n, bins, patches = plt.hist(trials, color="orange")
print(std, avg)
plt.axvline(avg, color="red", linestyle="dashed", linewidth=2)
t = plt.text(avg + 0.1 * std, np.max(n) * 0.15, f"{avg:.1f}")
t.set_bbox(dict(facecolor="white", alpha=0.7, edgecolor="black"))
plt.axvline(avg + std, color="g", linestyle="dashed", linewidth=1)
plt.text(
avg + std * 1.1, np.max(n) * 0.1, "{:.1f}".format(avg + std), color="g"
)
plt.axvline(avg - std, color="g", linestyle="dashed", linewidth=1)
plt.text(
avg - std * 0.9, np.max(n) * 0.1, "{:.1f}".format(avg - std), color="g"
)
plt.axvline(avg + 2 * std, color="b", linestyle="dashed", linewidth=1)
plt.text(
avg + 2.1 * std,
np.max(n) * 0.05,
"{:.1f}".format(avg + 2 * std),
color="b",
)
plt.axvline(avg - 2 * std, color="b", linestyle="dashed", linewidth=1)
plt.text(
avg - 1.9 * std,
np.max(n) * 0.05,
"{:.1f}".format(avg - 2 * std),
color="b",
)
plt.title(
f"{numOfIteration} iterations, T={T}, std={std:.1f}, avg={avg:.1f}"
)
plt.xlabel("score (kilosec)")
plt.ylabel("freq")
plt.savefig(
f"r_{numOfIteration}iteration_{T}T_{sample_count}samples_{hostname}.png"
)
plt.show()
print(
f"iteration={numOfIteration:<8} T={T:<6} std={std}\tavg={avg}", end="\t"
)
return trials
return wrapper
return decorator
import math
def minCostItem(possible_nexts, current):
minCost = math.inf
minIndex = None
for i in range(len(possible_nexts)):
item = possible_nexts[i]
if item == current:
continue
if minCost > adjMatrix[current][item]:
minCost = adjMatrix[current][item]
minIndex = i
return minIndex
def greedy_route(num_of_points, firstitem):
route = []
possible_nexts = list(range(0, num_of_points, 2))
possible_nexts.remove(firstitem)
route.append(firstitem)
possible_nexts.append(firstitem + 1)
while len(route) < num_of_points:
item = possible_nexts.pop(minCostItem(possible_nexts, route[-1]))
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route
def random_route(num_of_points):
route = []
possible_nexts = list(range(0, num_of_points, 2))
while len(route) < num_of_points:
item = possible_nexts.pop(randint(0, len(possible_nexts) - 1))
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route
def canMove(item, newLocation, etuor):
if isPickup(item):
if newLocation < etuor[toDropoff(item)]:
return True
else:
if newLocation > etuor[toPickup(item)]:
return True
return False
import math
def FEELING_GOOD(delta, temperature):
if temperature == 0:
return False
return random.random() < math.exp(-delta / temperature)
def somewhat_minCostItem(possible_nexts, current, degree_of_randomness):
row = adjMatrix_indexed[current]
for distance, tspIdx in row:
if tspIdx in possible_nexts and random.random() < degree_of_randomness:
return tspIdx
return possible_nexts[0]
def random_greedy_route(num_of_points, firstitem, degree_of_randomness):
route = []
possible_nexts = list(range(0, num_of_points, 2))
possible_nexts.remove(firstitem)
route.append(firstitem)
possible_nexts.append(firstitem + 1)
while len(route) < num_of_points:
item = somewhat_minCostItem(possible_nexts, route[-1], degree_of_randomness)
possible_nexts.remove(item)
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route
def nth_minCostItem(possible_nexts, current, nth_best):
row = adjMatrix_indexed[current]
for distance, tspIdx in row:
if tspIdx in possible_nexts: # ??
if nth_best == 0:
return tspIdx
nth_best -= 1
return possible_nexts[0]
import random
def random_nth_greedy_route(num_of_points, firstitem, rand_list):
route = []
possible_nexts = list(range(0, num_of_points, 2))
possible_nexts.remove(firstitem)
route.append(firstitem)
possible_nexts.append(firstitem + 1)
while len(route) < num_of_points:
if len(route) in rand_list:
item = nth_minCostItem(possible_nexts, route[-1], 1)
else:
item = nth_minCostItem(possible_nexts, route[-1], 0)
possible_nexts.remove(item)
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route
from copy import deepcopy
def findMin_random_route(
random_func, degree_of_randomness, numTryIteration, plot=False
):
baseline = 125000
min_cost = float("inf")
min_cost_list = []
cost_list = []
new_good_found_counter = 0
for i in range(numTryIteration):
# how_many_seconds = random.randint(1,degree_of_randomness)
# rand_list = random.sample( range(0, len(alls)), how_many_seconds) # generate n unique random numbers from 0 to 10
# route = random_func(len(alls), start_point ,rand_list)#degree_of_randomness
route = random_func(len(alls), 0, degree_of_randomness) # degree_of_randomness
c = cost(route)
cost_list.append(c)
if c < min_cost:
if c < 124000:
new_good_found_counter += 1
good_diff_cheats.insert(0, deepcopy(diff_cheat))
if new_good_found_counter % 2 == 0: #
del good_diff_cheats[-1]
min_cost = c
min_route = route
try:
print(min_cost, i, 0, how_many_seconds)
except:
print(min_cost, "iteration:", i)
print(f"\t {len(diff_cheat)} {diff_cheat}")
min_cost_list.append(min_cost)
if plot:
plt.plot(cost_list)
plt.plot(min_cost_list)
print(min_cost, degree_of_randomness, numTryIteration)
return min_route
diff_cheat = []
good_diff_cheats = []
def smart_somewhat_minCostItem(possible_nexts, current, degree_of_randomness):
global diff_cheat
if random.random() < 0.5:
for diffs in good_diff_cheats:
if len(diffs) > len(diff_cheat):
if (
diff_cheat == diffs[: len(diff_cheat)]
and diffs[len(diff_cheat)][0] == current
):
diff_cheat.append(diffs[len(diff_cheat)])
return diffs[len(diff_cheat) - 1][1]
row = adjMatrix_indexed[current]
this_is_nth_best = 0
for distance, tspIdx in row:
if tspIdx in possible_nexts:
if random.random() < degree_of_randomness:
if this_is_nth_best > 0:
diff_cheat.append([current, tspIdx])
return tspIdx
this_is_nth_best += 1
return possible_nexts[0]
def smart_random_greedy_route(num_of_points, firstitem, degree_of_randomness):
global diff_cheat
diff_cheat.clear()
route = []
possible_nexts = list(range(0, num_of_points, 2))
possible_nexts.remove(firstitem)
route.append(firstitem)
possible_nexts.append(firstitem + 1)
while len(route) < num_of_points:
item = smart_somewhat_minCostItem(
possible_nexts, route[-1], degree_of_randomness
)
try:
possible_nexts.remove(item)
except:
print("route", route)
print("diff_cheat", diff_cheat)
print("good_diff_cheats", good_diff_cheats)
print("returned ======", route[-1], item)
stop
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route
adjMatrix_indexed = [[0 for j in adjMatrix] for i in adjMatrix]
for r, row in enumerate(adjMatrix):
for c, item in enumerate(row):
adjMatrix_indexed[r][c] = (item, c)
adjMatrix_indexed[r].sort()
# findMin_random_route(randomadjMatr6_indexed317 ].insert(0,(0.01,57 ))_greedy_route,0.99,100, True)#118781
def update_costs(change_list):
global adjMatrix_indexed
cost = None
for i, j in change_list:
adjMatrix_indexed[i].insert(0, (cost, j))
# cheat = [(42, 266), (332, 168), (224, 277), (80, 215), (63, 131), (160, 88)]#[115329
# update_costs(cheat)
baseline = greedy_route(len(alls), 0)
smart_greedy_route = findMin_random_route(
smart_random_greedy_route, 0.97, 1000, True
) # 108
def experiment(_sample_count=100, _numOfIteration=10, _T=0.99):
@timer
@testDistribution(sample_count=_sample_count, numOfIteration=_numOfIteration, T=_T)
def inside():
# simulated_annealing(10,0)
return findMin_random_route(random_greedy_route, _T, _numOfIteration)
inside()
test_randomness = [
0.9,
0.91,
0.92,
0.93,
0.94,
0.95,
0.96,
0.97,
0.98,
0.985,
0.99,
0.995,
0.999,
0.9995,
]
print(len(test_randomness))
def highlight_differences(list1, list2):
# Ensure that the two lists have the same length
if len(list1) != len(list2):
print("Error: The two lists must have the same length.")
return
# Loop over the elements of the two lists and compare them
for i in range(len(list1)):
if list1[i] == list2[i]:
# The elements are the same, print them normally
print(list1[i], end=" ")
else:
# The elements are different, print them in green
print("\033[42m" + str(list1[i]) + "\033[0m", end=" ")
print() # Print a newline at the end
def twoswap(route, temperature):
etuor = {}
for i, item in enumerate(route):
etuor[item] = i
cur_cost = cost(route)
# printtsp(route)
item = randint(0, len(route) - 1)
loc1 = etuor[item]
# print("item",alls[item], "located at",etuor[item])
if isPickup(item):
d = toDropoff(item)
# print("drop off located at", etuor[d])
possibleSwaps = range(0, etuor[d])
else: # is drop off
p = toPickup(item)
possibleSwaps = range(etuor[p] + 1, len(route))
# print("pickup located at", etuor[p])
for loc2 in possibleSwaps:
# print(loc1, loc2)
if canMove(route[loc2], loc1, etuor):
if loc1 != loc2:
route[loc1], route[loc2] = route[loc2], route[loc1]
return ###
delta = cost(route) - cur_cost
if delta > 0 and not FEELING_GOOD(delta, temperature): # bad
route[loc1], route[loc2] = route[loc2], route[loc1]
break
def shift(route):
pass
@timer
def individual_2opt(count_of_swaps, numOfIteration, route):
costs = []
mincost = cost(route)
for x in range(numOfIteration):
new_route = deepcopy(route)
for i in range(count_of_swaps):
twoswap(new_route, 0)
new_cost = cost(new_route)
costs.append(new_cost)
delta = new_cost - mincost
if delta < 0 or FEELING_GOOD(delta, 5):
mincost = new_cost
print("FOUND", new_cost)
route = new_route
print(count_of_swaps, numOfIteration, mincost, min(costs))
plt.plot(costs)
plt.title(f"randomly performing {count_of_swaps} swaps")
plt.xlabel("Iteration")
plt.ylabel("Cost")
plt.show()
plt.savefig(f"{numOfIteration}iteration_{count_of_swaps}count_of_swaps.png")
# individual(30,1000,random_route(len(alls)))#136191.12329999983
# individual(1,10000,greedy_route(len(alls), 4))#136191.12329999983
test_route = deepcopy(smart_greedy_route)
print(cost(test_route))
individual_2opt(3, 1000 * 1000 * 10, test_route)
# print(test_route)
twoswap(test_route, 0)
highlight_differences(test_route, smart_greedy_route)
def random_choice(pdf):
items, relative_probabilities = zip(*pdf)
total = np.sum(relative_probabilities)
r = random.uniform(0, 1) * total
s = 0
for item, prob in pdf:
s += prob
if s >= r:
break
return item
# pdf = [(0.1, 1), (0.05, 2), (0.05, 3)]
# dist_pdf = sorted(random_choice2(pdf, [2,3,4]) for _ in range(10000))
# plt.hist(dist_pdf)
def random_choice2(pdf, possible_selections):
# pdf = [(0.1, 1), (0.05, 2), (0.05, 3)]
pdf = [pair for pair in pdf if (pair[1] in possible_selections)]
relative_probabilities, items = zip(*pdf)
total = np.sum(relative_probabilities)
r = random.uniform(0, 1) * total
s = 0
for prob, item in pdf:
s += prob
if s >= r:
break
return item
adjMatrix_distribution = [[0 for j in adjMatrix] for i in adjMatrix]
for r, row in enumerate(adjMatrix):
for c, item in enumerate(row):
adjMatrix_distribution[r][c] = (item, c)
del adjMatrix_distribution[r][r]
def convert_to_probability(example):
costs, tspidxs = zip(*example)
avg_1 = np.mean(costs)
def cost_to_probability_weight(cost, first, average):
# return 1/((cost-first+average/100))
steepness = 100
return np.exp(-(cost / average) * steepness) # 1/(2**cost)
first = np.min(costs)
averagecost = np.mean(costs)
probabilities = [cost_to_probability_weight(c, first, averagecost) for c in costs]
total_probability = np.sum(probabilities)
probabilities = [p / total_probability for p in probabilities]
return {idx: prob for prob, idx in zip(probabilities, tspidxs)}
probability_distribution = [convert_to_probability(i) for i in adjMatrix_distribution]
def probabilistic_somewhat_minCostItem(possible_nexts, current):
pdf = probability_distribution[current]
return random_choice2(list(zip(pdf.values(), pdf.keys())), possible_nexts)
def probabilistic_absolutelygreedy_minCostItem(possible_nexts, current):
pdf = probability_distribution[current]
pdf = list(zip(pdf.values(), pdf.keys()))
pdf = [pair for pair in pdf if (pair[1] in possible_nexts)]
sorted_pdf = sorted(pdf)
return sorted_pdf[-1][1]
def probabilistic_random_greedy_route(num_of_points, firstitem, minFunc):
route = []
possible_nexts = list(range(0, num_of_points, 2))
possible_nexts.remove(firstitem)
route.append(firstitem)
possible_nexts.append(firstitem + 1)
choices = [] # from, to
while len(route) < num_of_points:
item = minFunc(possible_nexts, route[-1])
choices.append([route[-1], item])
possible_nexts.remove(item)
route.append(item)
if isPickup(item):
possible_nexts.append(item + 1)
return route, choices
greedy = greedy_route(len(alls), 0)
current_best_cost = cost(greedy)
baseline = 136000
def test_probability_distribution():
# sorted_dict = list(sorted(probability_distribution[153].items(), key=lambda x: x[1]))
# pos = [12, 40, 58, 66, 78, 86, 96, 100, 106, 122, 128, 132, 134, 136, 144, 154, 162, 186, 188, 190, 208, 210, 240, 264, 282, 288, 294, 296, 310, 320, 324, 326, 336, 340, 342, 346, 81, 35, 29, 169, 309, 263, 181, 27, 183, 249, 33, 47, 237, 53, 333, 335, 227, 349, 267, 37, 69, 177, 223, 103, 91, 141, 173, 329, 25, 127, 299, 105, 239, 75, 9]
# sorted_dict = [_ for _ in sorted_dict if _[0] in pos]
# 114956
# for fromIdx, toIdx in [(42, 266), (332, 168), (256, 300), (207, 248), (224, 277), (283, 74), (60, 152), (140, 71), (50, 157), (209, 175), (184, 281), (315, 239), (15, 49)]:
# diff = (1-probability_distribution[fromIdx][toIdx])*0.3 + 0.1
# #print(diff, end=",")
# probability_distribution[fromIdx][toIdx] +=diff
cost_list = []
testroute, choices = probabilistic_random_greedy_route(
len(alls), 0, probabilistic_somewhat_minCostItem
)
current_best_cost = cost(testroute)
for i in range(100):
print(i, end=" ")
testroute, choices = probabilistic_random_greedy_route(
len(alls), 0, probabilistic_somewhat_minCostItem
)
new_cost = cost(testroute)
cost_list.append(new_cost)
if new_cost < baseline:
ratio_delta_cost = baseline / new_cost - 1
k = 3
# print("updating",new_cost,"|",ratio_delta_cost*3,end=" ")
# for fromIdx, toIdx in choices:
# probability_distribution[fromIdx][toIdx] +=(2-probability_distribution[fromIdx][toIdx])*ratio_delta_cost*3
if new_cost < current_best_cost:
ratio_delta_cost = current_best_cost / new_cost - 1
print("\nnewbest", new_cost, ratio_delta_cost, end="\n\t")
for fromIdx, toIdx in choices:
diff = (
2 - probability_distribution[fromIdx][toIdx]
) * ratio_delta_cost + 0.1
# print(diff, end=",")
probability_distribution[fromIdx][toIdx] += diff
current_best_cost = new_cost
# testroute, choices = probabilistic_random_greedy_route(len(alls),0, probabilistic_absolutelygreedy_minCostItem)
# print(cost(testroute))
plt.plot(cost_list)
# dist_pdf = [random_choice2(probability_distribution[0], _a) for _ in range(1000)]
# _dict = {}
# import pandas
# from collections import Counter
# letter_counts = Counter(dist_pdf)
# total_count = sum(letter_counts.values())
# df = pandas.DataFrame.from_dict(letter_counts, orient='index').sort_values(by=0, ascending=False)
# df = df / total_count * 100 # convert counts to percentage frequency
# df.plot(kind='bar')
# _a, _b = zip(*probability_distribution[2])
# plt.plot(_a)
# print(_a[:20])
# print(_b)
# print("time:",[ _ for _ in adjMatrix_distribution[2] if _[1] in _b][:20])
|
# **Create Random Number Array**
import numpy as np
# **Using rand method**
a = np.random.rand(10)
a
b = np.random.rand(6) # library -> module -> method
b
# **Know the data type using dtype attribute**
n = np.array([1, 2, 3, 4], dtype=float)
n.dtype
n
# **Complex Array**
c = np.array([1 + 2j, 3 + 4j])
c.dtype
# **Boolen Array**
b = np.array([[True, False], [True, True]])
b.dtype
# **Indexing the number**
a = np.arange(10, 20)
a
a[1]
a[2]
a[-1]
b = np.eye(3)
b
b[1, 1]
# **Assignment**
a
a[-1] = 190
a
b
b[0, 1] = 5
b
# **Slicing the array**
a
a[1:5] # a[start_index : last_index(exclusive) : step]
a[0:8:2]
import numpy as np
a = np.arange(10)
a
b = a[::2]
b # b is a view of a
b[2] = 14
b
a
# **Share memory() method**
np.shares_memory(a, b)
# **Copy Method**
a
c = a[::2].copy()
c
c[2] = 4
c
a
# **Boolean Indexing** -Arrays can also be indexed using arrays:(also know as fancy indexing)
# Assignment - filter the array with only odd numbers.
import numpy as np
a = np.arange(20)
a
mask = a % 2 != 0
a[
mask
] # mask is a boolean array i.e it contain only true & flase....our ans will be all the value which has cirresponding true
# **Integer Indexing** - Array can also be indexed using integer arrays.
r = np.random.randint(20, 50, 10)
r
r[[0, 2, 4]]
# **Basic operation in Array**
# Addition, Substraction, Multiplication, power etc.
a = np.array([1, 2, 3, 4])
a * 2 # the multipication will be borcasted to every element
b = np.array([5, 6, 7, 8])
a * b # it will perform element wise operation
# Ohter operation -eg. sin, log, exp
a
np.sin(a)
np.log(a)
np.exp(a)
# Element wise multiplication- array must be of same size...corresponding element multiplication will happen
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
b = np.eye(3)
b
a * b
# **Dot method () - #dot product**
# a = [a1,b1,c1]
# b = [a2,b2,c2]
# Dot product = a1* a2 + b1 * b2 + c1 * c2
# **1D Array**
a = np.array([1, 2, 3])
b = np.array([1, 3, 3])
a.dot(b) # 1*1 + 2*3 + 3*3
# **2D Array**
# **Number of columns of a = Number of rows of b**
# (p,q)(r,s)->>q = r
# Resultant matrix is of shape(p,q)
a = np.array([[1, 2, 3], [4, 5, 6]])
b = np.array([[10, 11], [20, 21], [30, 31]])
a.dot(b)
# **Array Comparion**
import numpy as np
a = np.array([12, 45, 36, 78])
b = np.array([12, 75, 3, 7])
mask = a == b # element wise comparison
a[mask]
# array_equal()method
np.array_equal(a, b)
# **Logical Operator**
c = np.array([1, 0, 0, 1])
d = np.array([1, 0, 1, 0])
np.logical_and(c, d)
np.logical_or(c, d)
# **Some other computation**
# **1-D Array**
a = np.array([1, 2, 3, 4, 5])
np.sum(a)
np.mean(a)
np.min(a)
np.max(a)
np.argmin(a) # return the index of the minimum value
np.argmax(a)
np.median(a)
np.std(a)
# **2D-Array**
a = np.array([[1, 7, 5], [3, 6, 1]])
a
c = np.sum(a, axis=0)
c.ndim
np.sum(c)
np.sum(a, axis=1)
np.min(a, axis=0)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv(r"/kaggle/input/vgchartzdata-2023/Game_DataBGT.csv")
df1 = pd.read_excel(
"/kaggle/input/vgchartzdata-2023/Game_DataBGT_Genre.xlsx"
) # Main Dataset
df1.head()
df1.columns
## Converting Genre values from Boolean to Numeric
df1 = df1.astype(
{
"Action": "int",
"Action": "int",
"Strategy": "int",
"Puzzle": "int",
"Shooter": "int",
"Platformer": "int",
"Simulation": "int",
"Sports": "int",
"Arcade": "int",
"Massively Multiplayer": "int",
"Indie": "int",
"Adventure": "int",
"Racing": "int",
"RPG": "int",
"Casual": "int",
"Fighting": "int",
"Board Games": "int",
"Educational": "int",
"Family": "int",
"Card": "int",
"Singleplayer": "int",
"Multiplayer": "int",
"FPS": "int",
"Third Person": "int",
"Cooperative": "int",
"PVP": "int",
}
)
df1.head()
df1["Metascore"] = df1["Metascore"].replace(
"None",
)
df1["Metascore"] = pd.to_numeric(df1["Metascore"])
df1.head()
# Imputing Missing values using K-Neighbor Method
print("Data Before Imputation :")
df1.head()
df1.info()
# Dropping columns containing categorical values , since KNN methods works only with numerical values
df3 = df1.drop(
[
"Game",
"Publisher",
"Developer",
"Release Date",
"Last Update",
"Alltags",
"Genre",
"Console",
],
axis=1,
)
df3.info()
from sklearn.impute import KNNImputer
imputer = KNNImputer(n_neighbors=5)
imputer.fit(df3)
# O/P of KNN is array , converting to Dataframe
imputed = pd.DataFrame(imputer.transform(df3), columns=df3.columns)
imputed["Pos"] = imputed["Pos"].astype(int)
imputed
# (['Game','Publisher','Developer','Release Date','Last Update','Alltags','Genre','Console']
imputed.insert(1, "Game", df1["Game"])
imputed.insert(2, "Publisher", df1["Publisher"])
imputed.insert(3, "Developer", df1["Developer"])
imputed.insert(9, "Release Date", df1["Release Date"])
imputed.insert(10, "Last Update", df1["Last Update"])
imputed.insert(11, "Alltags", df1["Alltags"])
imputed.insert(12, "Console", df1["Console"])
imputed.info()
# Creating a New Column "Update" to check whether a game with update has correlation with Total SS or not.
imputed["Is_Update"] = np.where(imputed["Last Update"].isna(), 0, 1)
imputed.head(100)
fig, ax = plt.subplots(figsize=(15, 10))
dataplot = sns.heatmap(imputed.corr(), cmap="YlGnBu", annot=True, annot_kws={"size": 6})
# df1[[ 'VGChartz Score', 'Critic Score', 'User Score', 'Metascore' ]] = df[[ 'VGChartz Score', 'Critic Score', 'User Score', 'Metascore' ]].fillna(df[[ 'VGChartz Score', 'Critic Score', 'User Score', 'Metascore' ]].mean())
# df1.head(100)
# df['Total_SS'] = df['Total_SS'].astype(int)
# Critic score vs metascore
x = imputed.drop(
[
"Game",
"Publisher",
"Developer",
"Release Date",
"Last Update",
"Alltags",
"Console",
"Total_SS",
],
axis=1,
)
x = x.astype(
{
"Action": "int",
"Action": "int",
"Strategy": "int",
"Puzzle": "int",
"Shooter": "int",
"Platformer": "int",
"Simulation": "int",
"Sports": "int",
"Arcade": "int",
"Massively Multiplayer": "int",
"Indie": "int",
"Adventure": "int",
"Racing": "int",
"RPG": "int",
"Casual": "int",
"Fighting": "int",
"Board Games": "int",
"Educational": "int",
"Family": "int",
"Card": "int",
"Singleplayer": "int",
"Multiplayer": "int",
"FPS": "int",
"Third Person": "int",
"Cooperative": "int",
"PVP": "int",
"VGChartz Score": "int",
"User Score": "int",
"Critic Score": "int",
"Metascore": "int",
}
)
y = pd.DataFrame(imputed["Total_SS"])
y["Total_SS"] = y["Total_SS"].astype("int")
# imputed=pd.DataFrame(imputer.transform(df3), columns=df3.columns)
x.info()
y.info()
#
# from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=24
)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
X_train.info()
# ML Model - 1 - LOGISTIC REGRESSION
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
log_model = LogisticRegression(solver="lbfgs", max_iter=50000)
# Fit the Algorithm
LR = LogisticRegression()
LR.fit(X_train, y_train)
# Predict on the model
y_pred_LR = LR.predict(X_test)
y_pred_LR
Acc_LR = accuracy_score(y_pred_LR, y_test)
print("Accuracy of Logestic Regression :", Acc_LR)
report = classification_report(y_pred_LR, y_test)
report
CM_LR = confusion_matrix(y_test, y_pred_LR)
CM_LR
# ML Model - 2 - LIGHT GBM
from lightgbm import LGBMClassifier
LGBM = LGBMClassifier()
LGBM.fit(X_train, y_train)
y_pred_LGBM = LGBM.predict(X_test)
y_pred_LGBM
Acc_LGBM = accuracy_score(y_pred_LGBM, y_test)
print("Accuracy of LightGBM Classifier :", Acc_LGBM)
report = classification_report(y_pred_LGBM, y_test)
report
CM_LGBM = confusion_matrix(y_test, y_pred_LGBM)
CM_LGBM
# ML Model - 3 - GAUSSIAN NAIVE BAYES MODEL
from sklearn.naive_bayes import GaussianNB
GNB = GaussianNB()
# Fit the Algorithm
GNB.fit(X_train, y_train)
# Predict on the model
y_pred_GNB = GNB.predict(X_test)
y_pred_GNB
Acc_GNB = accuracy_score(y_pred_GNB, y_test)
print("Accuracy of GaussianNB :", Acc_GNB)
report = classification_report(y_pred_GNB, y_test)
report
CM_GNB = confusion_matrix(y_test, y_pred_GNB)
CM_GNB
# Model 4 - Linear Regression
# ML Model - 1 Implementation
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
# Fit the Algorithm
lm.fit(X_train, y_train)
# Predict on the model
lm.score(X_test, y_test)
y_pred = lm.predict(X_test)
plt.scatter(y_test, y_pred)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=10)
knn.fit(X_train, y_train)
knn.score(X_test, y_test)
error_rate = []
for i in range(1, 20):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10, 6))
plt.plot(
range(1, 20),
error_rate,
color="blue",
linestyle="dashed",
marker="o",
markerfacecolor="red",
markersize=5,
)
plt.title("Error Rate vs. K Value")
plt.xlabel("K")
plt.ylabel("Error Rate")
from sklearn.metrics import classification_report, confusion_matrix
pred = knn.predict(X_test)
classification_report(y_test, pred)
matrix = confusion_matrix(y_test, pred)
matrix
# ML Model - 3 Implementation
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
# Fit the Algorithm
dtree.fit(X_train, y_train)
# Predict on the model
dtree.score(X_test, y_test)
pred = dtree.predict(X_test)
classification_report(y_test, pred)
matrix = confusion_matrix(y_test, pred)
matrix
|
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
sns.set_style("darkgrid")
from tqdm.notebook import tqdm
tqdm.pandas()
from catboost import CatBoostClassifier, CatBoostRegressor
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import (
train_test_split,
KFold,
StratifiedKFold,
cross_val_score,
RepeatedStratifiedKFold,
)
from sklearn import metrics
from lightgbm import LGBMClassifier
from tsfresh import extract_features, extract_relevant_features, select_features
from tsfresh.feature_extraction import MinimalFCParameters, ComprehensiveFCParameters
from tsfresh import select_features
from tsfresh.utilities.dataframe_functions import impute
df = pd.read_csv("../input/jobathon-analytics-vidhya/train.csv")
tdf = pd.read_csv("../input/jobathon-analytics-vidhya/test.csv")
df["train"] = 1
tdf["train"] = 0
df.head()
df = pd.concat([df, tdf])
df.isnull().sum()
df[df["Reco_Insurance_Type"] == "Joint"].Is_Spouse.value_counts()
pd.Series(
np.logical_and(df.Is_Spouse == "Yes", df["Reco_Insurance_Type"] == "Joint")
).value_counts()
target_col = "Response"
cat_cols = [
"City_Code",
"Region_Code",
"Accomodation_Type",
"Reco_Insurance_Type",
"Is_Spouse",
"Health Indicator",
"Holding_Policy_Duration",
"Holding_Policy_Type",
"Reco_Policy_Cat",
"is_joint_and_spouse",
]
reg_cols = ["Reco_Policy_Premium", "Upper_Age", "Lower_Age"]
imputed_reg_cols = ["age_diff"]
def dframe_expand(dframe):
dframe["age_diff"] = dframe["Upper_Age"] - dframe["Lower_Age"]
dframe["Reco_Policy_Premium"] = (
dframe["Reco_Policy_Premium"] / dframe["Reco_Policy_Premium"].max()
)
dframe["age_diff_avg"] = dframe["age_diff"] / dframe["age_diff"].max()
dframe["Upper_Age_avg"] = dframe["Upper_Age"] / dframe["Upper_Age"].max()
dframe["Lower_Age_avg"] = dframe["Lower_Age"] / dframe["Lower_Age"].max()
dframe["age_diff_max"] = dframe["age_diff"].max()
dframe["Upper_Age_max"] = dframe["Upper_Age"].max()
dframe["Lower_Age_max"] = dframe["Lower_Age"].max()
dframe["age_diff_min"] = dframe["age_diff"].min()
dframe["Upper_Age_min"] = dframe["Upper_Age"].min()
dframe["Lower_Age_min"] = dframe["Lower_Age"].min()
dframe["is_joint_and_spouse"] = pd.Series(
np.logical_and(
dframe.Is_Spouse == "Yes", dframe["Reco_Insurance_Type"] == "Joint"
)
)
# # dframe = dframe.set_index("ID", drop=False)
# dframe.index.name = "index"
# X = dframe[dframe.train == 1].drop(columns=cat_cols + ["train"] + [target_col], axis=1)
# y = dframe[dframe.train == 1][target_col]
# relevant_features = extract_relevant_features(
# X, y,
# column_id='ID', show_warnings=False,
# disable_progressbar=False,
# default_fc_parameters=MinimalFCParameters(),
# n_jobs=0)
# df_extra = extract_features(
# dframe.drop(columns=cat_cols + ["train"] + [target_col], axis=1),
# column_id='ID', show_warnings=False,
# impute_function=impute, disable_progressbar=False,
# default_fc_parameters=MinimalFCParameters(),
# n_jobs=0)
# print(dframe.shape)
# print(df_extra.columns)
# print(relevant_features.columns)
# for col in relevant_features.columns:
# dframe[col] = df_extra[col]
# print(dframe.shape)
return dframe
df = dframe_expand(df)
df
target_encoder = LabelEncoder()
label_encoders = {}
def train_encoder(dframe, col, test=False):
if test:
dframe[col] = label_encoders[col].transform(
dframe[col].fillna("nan").astype(str)
)
else:
label_encoders[col] = LabelEncoder()
dframe[col] = label_encoders[col].fit_transform(
dframe[col].fillna("nan").astype(str)
)
for col in tqdm(cat_cols):
train_encoder(df, col)
df.dropna(axis=1, how="all", inplace=True)
df.dtypes
grid = {
"learning_rate": [0.1], # [.05, 0.1, .2],
"max_depth": [10, 12, 14],
"n_estimators": [600, 400, 500, 550],
}
model = CatBoostClassifier(
random_state=22,
task_type="GPU",
devices="0:1",
eval_metric="AUC",
thread_count=3,
cat_features=cat_cols,
custom_metric=["AUC:hints=skip_train~false"],
metric_period=50,
od_type="Iter",
od_wait=10,
loss_function="Logloss",
)
df.shape
grid_search_result = model.grid_search(
grid,
X=df[df.train == 1].drop(["train", target_col], axis=1),
y=df[df.train == 1][target_col],
cv=4,
shuffle=True,
stratified=True,
verbose=False,
plot=True,
refit=True,
)
grid_search_result
result = pd.DataFrame(
{
"ID": df[df.train == 0].ID,
"Response": model.predict(
df[df.train == 0].drop(["train", target_col], axis=1)
),
}
)
result.head()
result.to_csv("submission.csv", index=False)
from IPython.display import FileLink
FileLink("submission.csv")
result.Response.value_counts() / result.shape[0] * 100
df[df.train == 1].Response.value_counts() / df[df.train == 1].shape[0] * 100
df[df.train == 1].Response.value_counts()
|
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
df = pd.DataFrame(
{
"Year": [
2012,
2012,
2012,
2012,
2012,
2012,
2012,
2012,
2012,
2012,
2012,
2012,
2013,
2013,
2013,
2013,
2013,
2013,
2013,
2013,
2013,
2013,
2013,
2013,
],
"Month": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
],
"Period": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
],
"Claims": [
10159,
11175,
12310,
12446,
13213,
16412,
17405,
14233,
14606,
12969,
13980,
14755,
12300,
13224,
13606,
13659,
16442,
17334,
19605,
18997,
15971,
15740,
16919,
18931,
],
}
)
df.head()
df["Claims_shifted"] = df["Claims"].shift(1)
df = df.dropna()
df.head()
X = np.array(df["Claims_shifted"]).reshape(-1, 1)
y = df["Claims"]
model = LinearRegression()
model.fit(X, y)
print(
"Regression function: y = {:.2f}x + {:.2f}".format(model.coef_[0], model.intercept_)
)
print("R^2 value:", model.score(X, y))
df["Predictions"] = model.predict(X)
plt.plot(df["Period"], df["Claims"], label="Original data")
plt.plot(df["Period"], df["Predictions"], label="Linear Regression")
plt.legend()
plt.xlabel("Time (months)")
plt.ylabel("Claims")
plt.show()
|
# # Processamento Analítico de Dados em Larga Escala
# ## Aula 02: Dados Multidimensionais e Consultas Analíticas
# ## Exemplo usando Pandas
# ## Respostas dos Exercícios
# **Profa. Dra. Cristina Dutra de Aguiar**
# **ICMC/USP**
# Esta lista contém exercícios referentes à Aula 02, intitulada Dados Multidimensionais e Consultas Analíticas. A resposta de cada exercício encontra-se especificada logo após a sua definição. Recomenda-se fortemente que a lista de exercícios seja respondida antes de se consultar as respostas dos exercícios.
# # 1 Carregamento das Tabelas de Dimensão e das Tabelas de Fatos
#
import pandas as pd
# criando os DataFrames para as tabelas de dimensão
data = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/data.csv"
)
funcionario = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/funcionario.csv"
)
equipe = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/equipe.csv"
)
cargo = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/cargo.csv"
)
cliente = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/cliente.csv"
)
# criando os DataFrames para as tabelas de fatos
pagamento = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/pagamento.csv"
)
negociacao = pd.read_csv(
"https://raw.githubusercontent.com/CristinaAguiar/DataMartCelebCo/main/negociacao.csv"
)
# # 2 Exercícios
# ## 2.1 Exercício 1
# Qual a soma dos salários por data por funcionário, para os funcionários do sexo M (masculino) da equipe de código igual a 1?
# **Dica**: são necessários dados da tabela de dimensão `funcionario` e da tabela de fatos `pagamento`. Também é necessário realizar as comparações solicitadas, ou seja, sexo do funcionário = M e código da equipe = 1.
# restringindo os funcionários para funcionários de sexo masculino
funcionariosMasculinos = funcionario.query('funcSexo == "M"')
# realizando a junção dos funcionários de sexo masculino com a tabela de fatos pagamento
# considerando como coluna de junção a coluna funcPK
pagamentoFuncionariosMasculinos = pagamento.merge(funcionariosMasculinos, on="funcPK")
# restringindo as equipes para equipes iguais a 1
pagamentoFuncionariosMasculinosEquipe = pagamentoFuncionariosMasculinos.query(
"equipePK == 1"
)
# realizando os agrupamentos solicitados
pagamentoFuncionariosMasculinosEquipe.groupby(["dataPK", "funcPK"])["salario"].sum()
# ## 2.2 Exercício 2
# Mostre uma visualização gráfica dos salários considerados na soma de salários do Exercício 1, utilizando o método `pandas.DataFrame.plot.scatter`.
# exibindo a resposta usando o método pandas.DataFrame.plot.scatter
pagamentoFuncionariosMasculinosEquipe.plot(kind="scatter", x="funcPK", y="salario")
# ## 2.3 Exercício 3
# Qual a soma das receitas em cada um dos semestres do ano de 2017?
# **Dica**: são necessários dados da tabela de dimensão `data` e da tabela de fatos `negociacao`. Também é necessário realizar a comparação solicitada, ou seja, ano = 2017.
# restringindo os anos para o ano de 2017
data2017 = data.query("dataAno == 2017")
# realizando a junção do ano de 2017 com a tabela de fatos negociacao
# considerando como coluna de junção a coluna dataPK
negociacaoData2017 = negociacao.merge(data2017, on="dataPK")
# realizando a junção das negociações realizadas no ano de 2017 com a tabela de dimensão cliente
# considerando como coluna de junção a coluna clientePK
negociacaoData2017cliente = cliente.merge(negociacaoData2017, on="clientePK")
# realizando os agrupamentos solicitados
negociacaoData2017clienteAgrupada = negociacaoData2017cliente.groupby(
["dataAno", "dataSemestre"]
)["receita"].sum()
# exibindo a resposta
negociacaoData2017clienteAgrupada.head(10)
# ## 2.4 Exercício 4
# Mostre uma visualização gráfica do resultado do Exercício 3, utilizando os métodos `pandas.DataFrame.plot.bar`, `pandas.DataFrame.plot.barh` e `pandas.DataFrame.plot.bar` com barras empilhadas (`unstack(level = 1)`).
# Analise as diferentes visualizações para você verificar qual é a mais semântica do seu ponto de vista.
# exibindo a resposta usando o método pandas.DataFrame.plot.bar
negociacaoData2017clienteAgrupada.plot(kind="bar")
# exibindo a resposta usando o método pandas.DataFrame.plot.barh
negociacaoData2017clienteAgrupada.plot(kind="barh")
# exibindo a resposta usando o método pandas.DataFrame.plot.bar
negociacaoData2017clienteAgrupada.unstack(level=1).plot(kind="bar", stacked=True)
# ## 2.5 Exercício 5
# Liste, para cada ano, a soma dos salários para o cargo de nome "COZINHEIRO". Arredonde a soma dos salários para até duas casas decimais. Devem ser exibidas as colunas na ordem e com os nomes especificados a seguir: "ANO", "TOTAL DE DESPESA". Ordene as linhas exibidas primeiro pelo total de despesa em ordem descendente e depois pelo ano em ordem descendente.
# **Dica**: são necessários dados das tabelas de dimensão `data` e `cargo` e da tabela de fatos `pagamento`. Também é necessário realizar a comparação solicitada, ou seja, nome do cargo = cozinheiro.
# restringindo os cargos para o cargo de nome COZINHEIRO
cargoCozinheiro = cargo.query('cargoNome == "COZINHEIRO"')
# realizando a junção dos cargos de cozinheiro com a tabela de fatos pagamento
# considerando como coluna de junção a coluna cargoPK
pagamentoCargoCozinheiro = pagamento.merge(cargoCozinheiro, on="cargoPK")
# realizando a junção dos pagamentos realizados para o cargo de cozinheiro
# com a tabela de dimensão data considerando como coluna de junção a coluna dataPK
dataPagamentoCargoCozinheiro = data.merge(pagamentoCargoCozinheiro, on="dataPK")
# realizando os agrupamentos solicitados
dataPagamentoCargoCozinheiroAgrupada = (
dataPagamentoCargoCozinheiro.groupby(["dataAno"])["salario"].sum().round(2)
)
# exibindo a resposta sem adicionar semântica
dataPagamentoCargoCozinheiroAgrupada.head(10)
# transformando pd.Series em pd.DataFrame
# ordenando a resposta pelo ano e salário
respostaSemantica = dataPagamentoCargoCozinheiroAgrupada.to_frame().reset_index()
respostaSemantica = respostaSemantica.sort_values(
by=["dataAno", "salario"], ascending=False
)
# renomeando as colunas
respostaSemantica = respostaSemantica.rename(
columns={"dataAno": "ANO", "salario": "TOTAL DE DESPESA"}
)
# exibindo a resposta com semântica adicional
respostaSemantica.head(10)
# ## 2.6 Exercício 6
# Liste, para cada nome do estado da filial na qual a equipe está localizada, a soma das receitas por ano considerando apenas o trimestre 1 e os clientes cuja região na qual eles moram é a mesma região na qual a filial está localizada. Arredonde a soma das receitas para até duas casas decimais. Devem ser exibidas as colunas na ordem e com os nomes especificados a seguir: "ESTADO", "ANO", "TOTAL DE RECEITA". Ordene as linhas exibidas primeiro pelo total de receitas em ordem descendente, depois por estado em ordem descendente, depois pelo ano em ordem descendente.
# **Dica**: são necessários dados das tabelas de dimensão `equipe`, `data` e `cliente` e da tabela de fatos `negociacao`. Também é necessário realizar as comparações solicitadas, ou seja, trimestre = 1 e região do cliente = região da filial na qual a equipe está localizada.
#
# restringindo as datas para o trimestre desejado
dataTri = data.query("dataTrimestre == 1")
# realizando a junção das datas do primeiro trimestre com a tabela de fatos negociacao
# considerando como coluna de junção a coluna dataPK
negDataTri = negociacao.merge(dataTri, on="dataPK")
# realizando a junção das negociacoes realizadas no primeiro trimestre
# com a tabela de dimensão equipe considerando como coluna de junção a coluna equipePK
negDataTriEq = equipe.merge(negDataTri, on="equipePK")
# realizando a junção das negociacoes realizadas no primeiro trimestre pelas equipes
# com a tabela de dimensão cliente considerando como coluna de junção a coluna clientePK
negDataTriEqCli = cliente.merge(negDataTriEq, on="clientePK")
# fazendo a comparação solicitada, ou seja, região do cliente = região da filial
# na qual a equipe está localizada
negCompara = negDataTriEqCli.query("filialRegiaoSigla == clienteRegiaoSigla")
# realizando os agrupamentos solicitados
negComparaAgrupa = (
negCompara.groupby(["filialEstadoNome", "dataAno"])["receita"].sum().round(2)
)
# exibindo a resposta sem adicionar semântica
negComparaAgrupa.head(10)
# transformando pd.Series em pd.DataFrame
# ordenando a resposta
negComparaAgrupaDf = negComparaAgrupa.to_frame().reset_index()
negComparaAgrupaDf = negComparaAgrupaDf.sort_values(
by=["receita", "filialEstadoNome", "dataAno"], ascending=False
)
# renomeando as colunas
negComparaAgrupaDf = negComparaAgrupaDf.rename(
columns={
"filialEstadoNome": "ESTADO",
"dataAno": "ANO",
"receita": "TOTAL DE RECEITA",
}
)
# exibindo a resposta com semântica adicional
negComparaAgrupaDf.head(10)
# ## 2.7 Exercício 7
# Qual a média dos salários recebidos por nível do cargo e por sexo no ano de 2019?
# restringindo as datas para o ano desejado
data2019 = data.query("dataAno == 2019")
# realizando a junção do ano de 2019 com a tabela de fatos pagamento
# considerando como coluna de junção a coluna dataPK
pag2019 = pagamento.merge(data2019, on="dataPK")
# realizando a junção dos pagamentos do ano de 2019 com a tabela de dimensão
# funcionario considerando como coluna de junção a coluna funcPK
pag2019Func = pag2019.merge(funcionario, on="funcPK")
# realizando a junção dos pagamentos dos funcionários do ano de 2019
# com a tabela de dimensão cargo considerando como coluna de junção
# a coluna cargoPK
pag2019FuncCargo = pag2019Func.merge(cargo, on="cargoPK")
# realizando os agrupamentos solicitados
pag2019FuncCargoAgrupado = (
pag2019FuncCargo.groupby(["cargoNivel", "funcSexo"])["salario"].mean().round(2)
)
# exibindo a resposta no formato de tabela bidimensional
pag2019FuncCargoAgrupado.head(10)
# ## 2.8 Exercício 8
# Mostre uma visualização gráfica do resultado do Exercício 7, utilizando o método `unstack(level = 1)` e `pandas.DataFrame.plot.bar` com gráfico de barras normal. **Importante**: Comente o resultado exibido no gráfico.
# exibindo a resposta usando o método pandas.DataFrame.plot.bar
# visualizando os dados para analisar melhor as diferenças salariais
pag2019FuncCargoAgrupado.unstack(level=1).plot(kind="bar")
# Existe um balanceamento entre as médias dos salários dos funcionários dos sexos feminino e masculino apenas para o cargo JUNIOR. Para o cargo PLENO, a média dos salários das funcionárias é maior, enquanto que para o cargo SENIOR, a médida dos salários dos funcionários é maior. A partir desses resultados, pode-se analisar a Celeb Co. para verificar como permitir um balanceamento entre as médias dos salários dos cargos PLENO e SENIOR.
# ## 2.9 Exercício 9
# Qual o custo/benefício das equipes quando analisado o primeiro semestre do ano de 2020?
# primeira parte da consulta drill-across
# investigando o custo das equipes (soma dos salários)
# restringindo as datas para o semestre e o ano desejado
data12020 = data.query("dataSemestre == 1 and dataAno == 2020")
# realizando a junção do semestre 1 do ano de 2019 com a tabela de fatos
# pagamento considerando como coluna de junção a coluna dataPK
pag12020 = data12020.merge(pagamento, on="dataPK")
# realizando os agrupamentos solicitados
pag12020Eq = pag12020.groupby(["equipePK"])["salario"].sum().to_frame()
# exibindo os dados obtidos para a primeira parte da consulta
pag12020Eq.head(5)
# segunda parte da consulta drill-across
# investigando o benefício das equipes (soma das receitas)
# restringindo as datas para o semestre e o ano desejado
data12020 = data.query("dataSemestre == 1 and dataAno == 2020")
# realizando a junção do semestre 1 do ano de 2019 com a tabela de fatos
# negociacao considerando como coluna de junção a coluna dataPK
neg12020 = data12020.merge(negociacao, on="dataPK")
# realizando os agrupamentos solicitados
neg12020Eq = neg12020.groupby(["equipePK"])["receita"].sum().to_frame()
# exibindo os dados obtidos para a segunda parte da consulta
neg12020Eq.head(5)
# juntando os custos e os benefícios
# realizando a junção dos dados obtidos para salário e receita
# considerando como coluna de junção a coluna equipePK
pagNeg = pag12020Eq.merge(neg12020Eq, on="equipePK")
# copiando os valores para uma variável
# para assim, poder calcular o custo/benefício
pagNegCopia = pagNeg.copy()
pagNegCopia["lucro"] = neg12020Eq["receita"] - pag12020Eq["salario"]
# exibindo a resposta
pagNegCopia.head(10)
# tornando a consulta mais semântica
respostaSemantica = equipe.merge(pagNegCopia, on="equipePK")
respostaSemantica[
["equipePK", "equipeNome", "filialNome", "salario", "salario", "lucro"]
]
# ## 2.10 Exercício 10
# Compare a quantidade de negociações que cada uma das equipes realizou nos anos de 2018 e 2019.
# primeira parte da operação drill-across
# investigando a quantidade de negociações das equipes para 2018
# restringindo as datas para o ano desejado
data2018 = data.query("dataAno == 2018")
# realizando a junção do ano de 2018 com a tabela de fatos
# negociacao considerando como coluna de junção a coluna dataPK
neg2018 = negociacao.merge(data2018, on="dataPK")
# realizando os agrupamentos solicitados e exibindo o resultado
neg2018Agrupa = neg2018.groupby(["equipePK"])["quantidadeNegociacoes"].sum().to_frame()
neg2018Agrupa.head(5)
# segunda parte da operação drill-across
# investigando a quantidade de negociações das equipes para 2019
# restringindo as datas para o ano desejado
data2019 = data.query("dataAno == 2019")
# realizando a junção do ano de 2018 com a tabela de fatos
# negociacao considerando como coluna de junção a coluna dataPK
neg2019 = negociacao.merge(data2019, on="dataPK")
# realizando os agrupamentos solicitados e exibindo o resultado
neg2019Agrupa = neg2019.groupby(["equipePK"])["quantidadeNegociacoes"].sum().to_frame()
neg2019Agrupa.head(5)
# realizando a operação drill-accross
neg20182019 = neg2018Agrupa.merge(
neg2019Agrupa, on="equipePK", suffixes=("_2018", "_2019")
)
neg20182019.head(5)
# tornando a consulta mais semântica
respostaSemantica = equipe.merge(neg20182019, on="equipePK")
respostaSemantica[
[
"equipeNome",
"filialNome",
"quantidadeNegociacoes_2018",
"quantidadeNegociacoes_2019",
]
]
|
import numpy as np, sys, os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.io import loadmat
import wfdb
import tarfile
# import wget
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import StratifiedKFold
from keras.preprocessing.sequence import pad_sequences
import math
import warnings
import os
def load_challenge_data(filename):
x = loadmat(filename)
data = np.asarray(x["val"], dtype=np.float64)
new_file = filename.replace(".mat", ".hea")
input_header_file = os.path.join(new_file)
with open(input_header_file, "r") as f:
header_data = f.readlines()
return data, header_data
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
ecg = load_challenge_data(
"../input/georgia-12lead-ecg-challenge-database/WFDB/E00001.mat"
)
# train a generative adversarial network on a one-dimensional function
from numpy import hstack
from numpy import zeros
from numpy import ones
from numpy.random import rand
from numpy.random import randn
from keras.models import Sequential
from keras.layers import Dense
from matplotlib import pyplot
# define the standalone discriminator model
def define_discriminator(n_inputs=2):
model = Sequential()
model.add(
Dense(
25, activation="relu", kernel_initializer="he_uniform", input_dim=n_inputs
)
)
model.add(Dense(1, activation="sigmoid"))
# compile model
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
return model
# define the standalone generator model
def define_generator(latent_dim, n_outputs=2):
model = Sequential()
model.add(
Dense(
15, activation="relu", kernel_initializer="he_uniform", input_dim=latent_dim
)
)
model.add(Dense(n_outputs, activation="linear"))
return model
# define the combined generator and discriminator model, for updating the generator
def define_gan(generator, discriminator):
# make weights in the discriminator not trainable
discriminator.trainable = False
# connect them
model = Sequential()
# add generator
model.add(generator)
# add the discriminator
model.add(discriminator)
# compile model
model.compile(loss="binary_crossentropy", optimizer="adam")
return model
# generate n real samples with class labels
def generate_real_samples(ecgsignal, n):
# generate inputs in [-0.5, 0.5]
x_axis = np.arange(ecgsignal.shape[0])
x_samp = np.random.choice(x_axis, n)
y_samp = ecgsignal[x_samp]
X = hstack((np.expand_dims(x_samp, 1), np.expand_dims(y_samp, 1)))
# generate class labels
y = ones((n, 1))
return X, y
# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n):
# generate points in the latent space
x_input = randn(latent_dim * n)
# reshape into a batch of inputs for the network
x_input = x_input.reshape(n, latent_dim)
return x_input
# use the generator to generate n fake examples, with class labels
def generate_fake_samples(generator, latent_dim, n):
# generate points in latent space
x_input = generate_latent_points(latent_dim, n)
# predict outputs
X = generator.predict(x_input)
# create class labels
y = zeros((n, 1))
return X, y
# evaluate the discriminator and plot real and fake points
def summarize_performance(
epoch, generator, discriminator, latent_dim, ecgsignal, n=100
):
# prepare real samples
x_real, y_real = generate_real_samples(ecgsignal, n)
# evaluate discriminator on real examples
_, acc_real = discriminator.evaluate(x_real, y_real, verbose=0)
# prepare fake examples
x_fake, y_fake = generate_fake_samples(generator, latent_dim, n)
# evaluate discriminator on fake examples
_, acc_fake = discriminator.evaluate(x_fake, y_fake, verbose=0)
# summarize discriminator performance
print(epoch, acc_real, acc_fake)
# scatter plot real and fake data points
pyplot.scatter(x_real[:, 0], x_real[:, 1], color="red")
pyplot.scatter(x_fake[:, 0], x_fake[:, 1], color="blue")
pyplot.show()
# train the generator and discriminator
def train(
g_model,
d_model,
gan_model,
latent_dim,
ecg_sig,
n_epochs=5000,
n_batch=128,
n_eval=100,
):
# determine half the size of one batch, for updating the discriminator
half_batch = int(n_batch / 2)
# manually enumerate epochs
for i in range(n_epochs):
# prepare real samples
x_real, y_real = generate_real_samples(ecg_sig, half_batch)
# prepare fake examples
x_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
# update discriminator
d_model.train_on_batch(x_real, y_real)
d_model.train_on_batch(x_fake, y_fake)
# prepare points in latent space as input for the generator
x_gan = generate_latent_points(latent_dim, n_batch)
# create inverted labels for the fake samples
y_gan = ones((n_batch, 1))
# update the generator via the discriminator's error
gan_model.train_on_batch(x_gan, y_gan)
# evaluate the model every n_eval epochs
if (i + 1) % n_eval == 0:
summarize_performance(i, g_model, d_model, latent_dim, ecg_sig)
# size of the latent space
latent_dim = 100
# create the discriminator
discriminator = define_discriminator()
# create the generator
generator = define_generator(latent_dim)
# create the gan
gan_model = define_gan(generator, discriminator)
# train model
train(
generator,
discriminator,
gan_model,
latent_dim,
ecg[0][1] / 1000,
n_epochs=1000,
n_batch=10,
n_eval=100,
)
|
import numpy as np
np.random.seed(42)
m = 100
X = 2 * np.random.rand(m, 1)
y = 4 + 3 * X + np.random.randn(m, 1)
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 4))
plt.plot(X, y, "b.")
plt.xlabel("$x_1$")
plt.ylabel("$y$", rotation=0)
plt.axis([0, 2, 0, 15])
plt.grid()
plt.show()
X.shape
y.shape
from sklearn.preprocessing import add_dummy_feature
X_b = add_dummy_feature(X)
theta_best = np.linalg.inv(X_b.T @ X_b) @ X_b.T @ y
theta_best
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
lin_reg.intercept_, lin_reg.coef_
|
# # GÖRÜNTÜ İŞLEME TEMELLERİ
# # Resim Görüntüleme
import matplotlib.pyplot as plt
import matplotlib.image as img
testImage = img.imread("/kaggle/input/ozalimage/ben.JPG")
# displaying the image
print(testImage.shape)
plt.imshow(testImage)
# # **Bazı görüntü işlemleri uygulayalım**
from matplotlib import pyplot as plt
import numpy as np
def show_image(image, title="Image", cmap_type="gray"):
image = image[300:1200, 1300:2600, :]
# image=image[:,:,:] -128; #(np.rint(np.dot(-0.5,image))).astype(int)
plt.imshow(image, cmap=cmap_type)
plt.title(title)
plt.axis("off")
plt.show()
print(image.shape)
# print(image[1:5,0:500:,0])
plt.subplot(1, 4, 1)
plt.imshow(image, cmap="gray")
plt.axis("off")
plt.title("Original")
# R level to gray
plt.subplot(1, 4, 2)
plt.imshow(image[:, :, 0], cmap="gray")
plt.axis("off")
plt.title("R to gray")
# G leval to gray
plt.subplot(1, 4, 3)
plt.imshow(image[:, :, 1], cmap="gray")
plt.axis("off")
plt.title("G to gray")
# B leval to gray
plt.subplot(1, 4, 4)
plt.imshow(image[:, :, 2], cmap="gray")
plt.axis("off")
plt.title("B to gray")
leaf_image = img.imread("/kaggle/input/ozalimage/ben.JPG")
show_image(leaf_image, "Original Image")
# # Grayscale Image
from skimage import color
def show_image(image, title, cmap_type="gray"):
plt.imshow(image, cmap=cmap_type)
plt.title(title)
plt.axis("off")
plt.show()
ozal_image = img.imread("/kaggle/input/ozalimage/ben.JPG")
ozal_gray = color.rgb2gray(ozal_image)
show_image(ozal_image, "orjinal")
show_image(ozal_gray, "Gri Tonlama")
# # Görüntü üzerinde filtreleme işlemleri CV
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as img
ozal_image = img.imread("/kaggle/input/ozalimage/ben.JPG")
image = color.rgb2gray(ozal_image)
fig, ax = plt.subplots(1, figsize=(12, 8))
plt.imshow(image, cmap="gray")
kernel = np.ones((3, 3), np.float32) / 16
img = cv2.filter2D(image, -1, kernel)
fig, ax = plt.subplots(1, 2, figsize=(10, 6))
ax[0].imshow(image, cmap="gray")
ax[1].imshow(img, cmap="gray")
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread("/kaggle/input/ozalimage/ben.JPG")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
kernel = np.array([[3, 10, 3], [0, 0, 0], [-3, -10, -3]])
kernel2 = np.array([[2, 0, -2], [1, 0.03, -5], [2, 0, -2]])
img = cv2.filter2D(image, -1, kernel)
img2 = cv2.filter2D(image, -1, kernel2)
fig, ax = plt.subplots(1, 2, figsize=(10, 6))
ax[0].imshow(img)
ax[1].imshow(img2)
# # Görüntü Histogramı
red = ozal_image[:, :, 0] # using the red channel of the rocket image.
plt.hist(
red.ravel(), bins=256
) # plot its histogram with 256 bins, the number of possible values of a pixel.
plt.title("Kırmızı Kanal Histogramı")
plt.show
# Piksel değerlerine ait frekanslar 25 ile 250 arasındadır. Genel olarak kırmızı pikseller 100 ile 175 aralığında yoğundur.
# # Eşikdeğer algoritmasının uygulanması
import cv2
ozal_img = cv2.imread("/kaggle/input/ozalimage/ben.JPG")
grayimg = cv2.cvtColor(ozal_img, cv2.COLOR_BGR2GRAY)
# **Grayscale Image**
plt.imshow(
grayimg, cmap="gray"
) # cmap has been used as matplotlib uses some default colormap to plot grayscale images
plt.xticks([]) # To get rid of the x-ticks and y-ticks on the image axis
plt.yticks([])
print("New Image Shape", grayimg.shape)
# Finding optimal threshold
from skimage.filters import threshold_otsu
thresh_val = threshold_otsu(grayimg)
print("The optimal seperation value is", thresh_val)
thresh = 120 # set a random thresh value
binary_high = grayimg > thresh_val
binary_low = grayimg <= thresh_val
show_image(binary_high, "Tresholded high values")
show_image(binary_low, "Tresholded low values")
# **Trying All Thresholded Values**
from skimage.filters import try_all_threshold
fig, ax = try_all_threshold(grayimg, verbose=False)
# **We see that Iso data , Mean and Otsu , dives better results than the others so let us look at Otsu Thresholding**
from skimage.filters import threshold_otsu
thresh = threshold_otsu(grayimg)
text_binary_otsu = grayimg > thresh
show_image(text_binary_otsu, "Otsu algorithm")
# # Applying Edge Detection Techniques on The Image
def plot_comparison(original, filtered, title_filtered):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 6), sharex=True, sharey=True)
ax1.imshow(original, cmap=plt.cm.gray)
ax1.set_title("original")
ax1.axis("off")
ax2.imshow(filtered, cmap=plt.cm.gray)
ax2.set_title(title_filtered)
ax2.axis("off")
# **Sobel Operator**
from skimage.filters import sobel
edge_image = sobel(grayimg) # apply the filter
plot_comparison(grayimg, edge_image, "Edge image")
# **We Can Now safely say , Sobel Operator , doesn't work on the image**
# # Let Us Look at the Smoothening Features
from skimage.filters import gaussian
smooth_ozal_image = gaussian(
ozal_img, multichannel=True
) # you have to specify the multichannel
plot_comparison(ozal_img, smooth_ozal_image, "Smooth leaf")
# # Contrast Enhancement Techniques
from skimage import exposure
equalized_ozal_image = exposure.equalize_hist(ozal_img)
plot_comparison(ozal_img, equalized_ozal_image, "Histogram equalization")
# **Note the leaves become more prominent by Histogram Equalization Techniques**
# **ADAPTIVE HISTOGRAM EQUALISATION**
from skimage import exposure
adapthits_leag_image = exposure.equalize_adapthist(leaf_img)
plot_comparison(leaf_img, adapthits_leag_image, "Adaptive Histogram equalization")
# It seems to have a better representation of the image
# # Now Let Us Finally Look at the Contour Methods
def show_image_contour(image, contours):
plt.figure()
for n, contour in enumerate(contours):
plt.plot(contour[:, 1], contour[:, 0], linewidth=3)
plt.imshow(image, interpolation="nearest", cmap="gray_r")
plt.title("Contours")
plt.axis("off")
plt.show()
from skimage import measure
contours_gray_image = measure.find_contours(grayimg, 0.8)
show_image_contour(grayimg, contours_gray_image)
# # Edge Detection
from skimage.feature import canny
canny_leaf_image = canny(grayimg)
show_image(canny_leaf_image)
# **Interestingly it shows The Focused Leaf Properly**
# # **Specific Objectives**
# The main objective of the competition is to develop machine learning-based models to accurately classify a given leaf image from the test dataset to a particular disease category, and to identify an individual disease from multiple disease symptoms on a single leaf image.
# # **Resources**
# I thank Kaggle for providing the dataset and [Data](http://https://bsapubs.onlinelibrary.wiley.com/doi/10.1002/aps3.11390)
# without whom this wouldn't have been Possible.
# Also I would like to thank [Ankur Singh](http://https://www.kaggle.com/ankursingh12/resized-plant2021) for this amazing dataset as without it , it would have taken hours and hours to train the below mentioned model.
# # Import the necessary libraries
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import PIL
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
import os
import random
from tqdm import tqdm
import tensorflow_addons as tfa
import random
from sklearn.preprocessing import MultiLabelBinarizer
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
import keras
from keras.preprocessing import image
from keras.models import Sequential
from tensorflow.keras.preprocessing.image import (
ImageDataGenerator,
load_img,
img_to_array,
smart_resize,
)
from keras.layers import Dense, Dropout, Flatten, BatchNormalization, Activation
from keras.constraints import maxnorm
from keras.layers.convolutional import Conv2D, MaxPooling2D
from tensorflow.keras.optimizers import Adam
import cv2
from PIL import Image
from keras.preprocessing.image import load_img, img_to_array
from keras.models import load_model
from keras.metrics import AUC
pd.set_option("display.max_columns", None)
# # **Let's Now Have a look at the Dataset and Study it better**
# I would like to thank [Praveen](http://https://www.kaggle.com/praveengovi/plant-pathology-detail-eda-pytorch) for this amazing EDA and analysis and also [Arnab](http://https://www.kaggle.com/arnabs007/apple-leaf-diseases-with-inceptionresnetv2-keras) from whom I have taken reference from . I have implemented various EDA and studied from their models and approached it with Transfer Learning Model of ResNet 50v2 base.
#
train_dir = "../input/plant-pathology-2021-fgvc8/train_images"
test_dir = "../input/plant-pathology-2021-fgvc8/test_images"
train = pd.read_csv("../input/plant-pathology-2021-fgvc8/train.csv")
# print(len(train))
# print(train.columns)
# print(train['labels'].value_counts())
# print(train['labels'].value_counts().plot.bar())
# # Let's Study the dataset in a better way and try to find some interesting stuff!!!
train.head
# **We get to know that we have "many" images with mostly 12 types of labels (but there is a twist) which we will comeback to later.**
# Let's look at the number of images for various of 12 categories present
train["labels"].value_counts()
plt.figure(figsize=(20, 12))
labels = sns.barplot(train.labels.value_counts().index, train.labels.value_counts())
for item in labels.get_xticklabels():
item.set_rotation(45)
# **Note**
# Notice that there is a huge imbalance in dataset with "scab" having the highest number of frequency and "powdery_mildew complex" , the least
# # Important Observation
# **Look at the labels, doesn't it strike you ??**
# **Some of the labels are mixture of one or more types !!! And thus the problem becomes Multilabel Problem**
# So there are not 12 labels, its actually just 6 labels.
# 5 diseases:
# 1. rust
# 2. scab
# 3. complex
# 4. frog eye leaf spot
# 5. powdery mildew
# and another label is
# 6. healthy (healthy leaves)
# Now the most important thing is, as one image can have multiple diseases, that means this problem is **Multi label classification** problem. Many get confused betweeen multilabel and multiclass classification. if you are new to multilabel classification I would suggest going over this [An introduction to MultiLabel classification](https://www.geeksforgeeks.org/an-introduction-to-multilabel-classification/) .
# So now we gotta process the labels. And then lets find out the actual frequencies of the labels.
# We divide it based on " " or space character , in order to get the labels for each of the image
train["labels"] = train["labels"].apply(lambda string: string.split(" "))
train
# Converting the labels representation into **one hot encoded format** using MultilabelBinarizer from Scikit learn. Now we can see and plot the frequencies of each label.
s = list(train["labels"])
mlb = MultiLabelBinarizer()
trainx = pd.DataFrame(mlb.fit_transform(s), columns=mlb.classes_, index=train.index)
print(trainx.columns)
# These are the 6 different labels
print(trainx.sum())
labels = list(trainx.sum().keys())
# print(labels)
label_counts = trainx.sum().values.tolist()
fig, ax = plt.subplots(1, 1, figsize=(20, 6))
sns.barplot(x=labels, y=label_counts, ax=ax)
# **NOW WE CAN SEE THE DATASET BECOMES MORE OR LESS BALANCED , AT LEAST BETTER THAN WHAT IT WAS PREVIOUSLY!**
# # Let's See the Plant Pathology Images
labels = pd.concat([train["image"], trainx], axis=1)
labels.head()
fig1 = plt.figure(figsize=(20, 10))
for i in range(1, 10):
rand = random.randrange(1, 18000)
sample = os.path.join(
"../input/plant-pathology-2021-fgvc8/train_images/", train["image"][rand]
)
img = PIL.Image.open(sample)
ax = fig1.add_subplot(4, 3, i)
ax.imshow(img)
title = f"{train['labels'][rand]}{img.size}"
plt.title(title)
fig1.tight_layout()
# # Imaze Size & Processing
# from the titles we can see some random image sizes - (4000, 2672). Larger images are harder to process hence takes much longer to train the CNN. Downsampling all these 18632 images is also a time consuming task. This is I am going to use the resized imaged for this dataset [resized-plant2021](https://www.kaggle.com/ankursingh12/resized-plant2021) by Ankur Singh. He has already downsampled the images into size of 256, 384, 512 & 640px.
# Now for Pre Processing I take help of the [Keras Image Data Generator](http://https://keras.io/api/preprocessing/image/). We transform it to size of (256,256,3) .
datagen = keras.preprocessing.image.ImageDataGenerator(
rescale=1 / 255.0,
preprocessing_function=None,
data_format=None,
)
train_data = datagen.flow_from_dataframe(
train,
directory="../input/resized-plant2021/img_sz_512",
x_col="image",
y_col="labels",
color_mode="rgb",
target_size=(256, 256),
class_mode="categorical",
batch_size=32,
shuffle=False,
seed=40,
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt
import seaborn as sns
import json
import pathlib
from PIL import Image
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import (
ImageDataGenerator,
array_to_img,
img_to_array,
load_img,
)
input_dir = "/kaggle/input/cassava-leaf-disease-classification"
os.listdir(input_dir)
# classification labels
file_path = input_dir + "/label_num_to_disease_map.json"
with open(file_path, "r") as fp:
json_data = json.load(fp)
class_name = list(json_data.values())
print(json_data) # list
print(class_name) # list
# train_data labels
train = pd.read_csv(input_dir + "/train.csv")
print(train.head(5))
#### test sample
train = train.head(1000)
# sample image and label
sample_image = train.sample(1)
ind = int(sample_image.index.values)
sample_id = sample_image.loc[ind, "image_id"]
sample_label = sample_image.loc[ind, "label"]
image = plt.imread(input_dir + "/train_images/" + sample_id)
plt.imshow(image)
plt.xlabel(class_name[sample_label])
plt.show()
# show some sample images and labels
plt.figure(figsize=(9, 9))
for i in range(9):
plt.subplot(3, 3, i + 1)
image = plt.imread(input_dir + "/train_images/" + train.loc[i, "image_id"])
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.xlabel(class_name[train.loc[i, "label"]])
plt.show()
# simple eda show statistics characters
sns.countplot(x="label", data=train)
plt.show()
# # **data processing**
# method 01: numpy array -> dataframe FAIL
# 将image转换会np.array形式
# 一个sample的array
image = tf.keras.preprocessing.image.load_img(input_dir + "/train_images/" + sample_id)
input_arr = tf.keras.preprocessing.image.img_to_array(image)
print(input_arr.shape)
###### the data size is too large, thus oom, sample!!!
train = train.head(1000) # later train=train.sample(2000)
img_list = []
label_list = []
for m, n in zip(train.image_id, train.label):
# id = m, label = n
image = tf.keras.preprocessing.image.load_img(input_dir + "/train_images/" + m)
input_arr = tf.keras.preprocessing.image.img_to_array(image)
# img_list.append(np.array([input_arr]))
img_list.append(input_arr)
label_list.append(n)
# process data into df
df = pd.concat([pd.Series(img_list), pd.Series(label_list)], axis=1)
df.columns = ["img_data", "label"]
df.head(1)
X = df["img_data"]
y = df["label"].astype("str")
## preprocessing resize&rescale
X = np.stack(X / 255.0) # normalize
X = X.reshape(-1, 600, 800, 1)
### validation split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1, random_state=42)
###### ImageDataGenerator:Generate batches of tensor image data with real-time data augmentation.
from keras.preprocessing.image import (
ImageDataGenerator,
array_to_img,
img_to_array,
load_img,
)
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # Set each sample mean to 0
featurewise_std_normalization=False, # Divide inputs by std of the dataset, feature-wise
samplewise_std_normalization=False, # Divide each input by its std
zca_whitening=False, # Apply ZCA whitening
rotation_range=0, # Degree range for random rotations
zoom_range=0, # Range for random zoom
width_shift_range=0.05, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.05, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # Randomly flip inputs horizontally
vertical_flip=False, # Randomly flip inputs vertically
)
datagen.fit(X_train)
#
# # **##############################################**
# **method 02: tensorflow frame success**
BATCH_SIZE = 16
STEPS_PER_EPOCH = len(train) * 0.8 / BATCH_SIZE
VALIDATION_STEPS = len(train) * 0.2 / BATCH_SIZE
EPOCHS = 10 # 10
TARGET_SIZE = 224
train.dtypes
train.label = train.label.astype("str")
train_generator = ImageDataGenerator(
validation_split=0.2,
preprocessing_function=None,
zoom_range=0.2,
cval=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode="nearest",
shear_range=0.2,
height_shift_range=0.2,
width_shift_range=0.2,
).flow_from_dataframe(
train,
directory=os.path.join(input_dir, "train_images"),
subset="training",
x_col="image_id",
y_col="label",
target_size=(TARGET_SIZE, TARGET_SIZE),
batch_size=BATCH_SIZE,
class_mode="sparse",
)
validation_generator = ImageDataGenerator(validation_split=0.2).flow_from_dataframe(
train,
directory=os.path.join(input_dir, "train_images"),
subset="validation",
x_col="image_id",
y_col="label",
target_size=(TARGET_SIZE, TARGET_SIZE),
batch_size=BATCH_SIZE,
class_mode="sparse",
)
# # model
# model.fit(datagen.flow(x_train,y_train,batch_size=32),steps_per_epoch=len(x_train)/32, epochs=epochs)
##### efficientnetb0
efn = tf.keras.applications.EfficientNetB0(
input_shape=(TARGET_SIZE, TARGET_SIZE, 3), include_top=False, weights="imagenet"
)
efn_model = tf.keras.Sequential(
[
efn,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(class_name), activation="softmax"),
]
)
efn_model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
early_stop = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0.001,
patience=5,
mode="min",
verbose=1,
restore_best_weights=True,
)
# efn_history = efn_model.fit(datagen.flow(X_train,y_train,batch_size=32),steps_per_epoch=len(X_train)/32, epochs=epochs, validation_data = (X_val,y_val))
efn_history = efn_model.fit(
train_generator,
batch_size=32,
epochs=EPOCHS,
validation_data=validation_generator,
callbacks=[early_stop],
)
train_acc = efn_history.history["accuracy"]
val_acc = efn_history.history["val_accuracy"]
print(train_acc)
print(val_acc)
##### mobile_net
mobile_net = tf.keras.applications.MobileNetV2(
input_shape=(TARGET_SIZE, TARGET_SIZE, 3), include_top=False, weights="imagenet"
)
# mobile_net.trainable = False
mon_model = tf.keras.Sequential(
[
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(class_name), activation="softmax"),
]
)
mon_model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
early_stop = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0.001,
patience=5,
mode="min",
verbose=1,
restore_best_weights=True,
)
# mon_history = mon_model.fit(datagen.flow(X_train,y_train,batch_size=32),steps_per_epoch=len(X_train)/32, epochs=epochs, validation_data = (X_val,y_val))
mon_history = mon_model.fit(
train_generator,
batch_size=32,
epochs=EPOCHS,
validation_data=validation_generator,
callbacks=[early_stop],
)
train_acc = mon_history.history["accuracy"]
val_acc = mon_history.history["val_accuracy"]
print(train_acc)
print(val_acc)
### cnn
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation="relu", input_shape=(600, 800, 3)))
## model.add(layers.Conv2D(filters = 32, kernel_size = (5,5), padding = 'same',activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
## model.add(Dropout(0.1))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(class_name))
model.complie(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
history = model.fit(x_train, y_train, epochs=10, validation_data=(x_val, y_val))
base_learning_rate = 0.0001
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate / 10),
metrics=["accuracy"],
) ##修改了optimizer
### early stop
early_stop = EarlyStopping(
monitor="val_loss",
min_delta=0.001,
patience=5,
mode="min",
verbose=1,
restore_best_weights=True,
)
# Set a learning rate annealer
learning_rate_reduction = ReduceLROnPlateau(
monitor="val_acc", patience=1, verbose=1, factor=0.5, min_lr=0.0001
)
# Fit the model
history = model.fit_generator(
datagen.flow(X_train, y_train, batch_size=batch_size),
epochs=epochs,
validation_data=(X_val, y_val),
verbose=2,
steps_per_epoch=X_train.shape[0] // batch_size,
callbacks=[learning_rate_reduction],
)
##### visualize result
model.fit(
datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32,
epochs=epochs,
)
# 将image转换会np.array形式 方法01
sample_image = train.sample(1)
ind = int(sample_image.index.values)
sample_id = sample_image.loc[ind, "image_id"]
sample_label = sample_image.loc[ind, "label"]
image = Image.open(input_dir + "/train_images/" + sample_id)
print((np.array(image)).shape)
# normalize
x = image / 255.0
# reshape
x = x.reshape(64, 64, 3)
# 将image转换会np.array形式 方法02
image = tf.keras.preprocessing.image.load_img(input_dir + "/train_images/" + sample_id)
input_arr = tf.keras.preprocessing.image.img_to_array(image)
print(input_arr.shape)
# validation split
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_path,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_path,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
### 或者把image全转成array,变成dataframe,然后train_test_split
### 或者 ImageDataGenerator(rescale,validation_split).flow_from_directory(source),再zip打包x,y
import pathlib
data_dir = pathlib.Path(input_dir + "/train_images") # 其实就是path
# 遍历所有文件的路径
# for i in data_dir.iterdir():
# print(i)
# train_ds = tf.keras.preprocessing.image_dataset_from_directory()
from keras.preprocessing.image import (
ImageDataGenerator,
array_to_img,
img_to_array,
load_img,
)
###### ImageDataGenerator:Generate batches of tensor image data with real-time data augmentation.
# datagen = ImageDataGenerator(rotation_range=44,rescale=1./255,wid_shift_range=0.4,height_shift_range=0.8,
# shear_range=0.7,zoom_range=0.3,horizontal_flip=True,vertical_flip=True,
# fill_mode='nearest')
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewie_center=False, # Set each sample mean to 0
featurewise_std_normalization=False, # Divide inputs by std of the dataset, feature-wise
samplewise_std_normalization=False, # Divide each input by its std
zca_whitening=False, # Apply ZCA whitening
rotation_range=0, # Degree range for random rotations
zoom_range=0, # Range for random zoom
width_shift_range=0.05, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.05, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # Randomly flip inputs horizontally
vertical_flip=False, # Randomly flip inputs vertically
)
# compute quantities required for featurewise normalization
datagen.fit(x_train)
model.fit(
datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32,
epochs=epochs,
)
## ## preprocessing reshape,rescale
# model = models.Sequential()
# model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape(32,32,3)))
img_size = 100
resize_and_rescale = tf.keras.Sequential(
[
layers.experimental.preprocessing.Resizing(img_size, img_size), # reshape
layers.experimental.preprocessing.Rescaling(1.0 / 255), # rescale pixel values
]
)
sample = resize_and_rescale(sample_image)
image = plt.imread(input_dir + "/train_images/" + sample_id)
plt.imshow(image)
plt.xlabel(class_name[sample_label])
plt.show()
### data augmentation
data_augmentation = tf.keras.Sequential(
layers.experimental.preprocessing.RamdomFlip("horizontal_and_vertical"),
layers.experimental.preprocessing.RandomRotation(0.2),
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/eeg-data-2/Scalogram_Data (5).csv")
df
df = df.iloc[:, 1:]
df
df = df.sample(frac=1).reset_index(drop=True)
df
X_train = df.iloc[:10000, :-5]
X_test = df.iloc[10000:, :-5]
Y_train = df.iloc[:10000, -5:]
Y_test = df.iloc[10000:, -5:]
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
np.random.seed(2)
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
sns.set(style="white", context="notebook", palette="deep")
g = sns.countplot(Y_train)
# Normalize the data
X_train = X_train / 255.0
X_test = X_test / 255.0
X_train
import tensorflow as tf
# Reshape image in 3 dimensions (height = 32px, width = 32px , canal = 1)
X_train_values = tf.reshape(X_train.values, [-1, 32, 32, 1])
X_test_values = tf.reshape(X_test.values, [-1, 32, 32, 1])
Y_train = np.array(Y_train)
Y_test = np.array(Y_test)
X_train = np.array(X_train_values)
X_test = np.array(X_test_values)
# Set the random seed
random_seed = 2
# Split the train and the validation set for the fitting
X_train1, X_val, Y_train1, Y_val = train_test_split(
X_train, Y_train, test_size=0.2, random_state=random_seed
)
from keras.layers import LeakyReLU
# Set the CNN model
# my CNN architechture is In -> [[Conv2D->relu]*1 -> MaxPool2D -> [Conv2D->relu]*1 -> MaxPool2D -> Flatten -> Dense -> Out
model = Sequential()
model.add(
Conv2D(
filters=16,
kernel_size=(5, 5),
padding="Same",
activation="relu",
input_shape=(32, 32, 1),
)
)
model.add(BatchNormalization())
# model.add(Conv2D(filters = 16, kernel_size = (7,7),padding = 'Same', activation ='relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
# model.add(Dropout(0.25))
# model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu'))
# model.add(BatchNormalization())
# model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu'))
# model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
# model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="Same", activation="relu"))
model.add(BatchNormalization())
# model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
# model.add(Dropout(0.25))
model.add(Flatten())
# model.add(Dense(1024))
# model.add(LeakyReLU(alpha=0.01))
model.add(Dense(256, activation="relu"))
# model.add(LeakyReLU(alpha=0.01))
model.add(Dense(5, activation="softmax"))
from keras.optimizers import Adadelta
# Define the optimizer
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# Set a learning rate annealer
learning_rate_reduction = ReduceLROnPlateau(
monitor="val_acc", patience=3, verbose=1, factor=0.5, min_lr=0.00001
)
# Set epochs and batch_size
epochs = 100
batch_size = 256
# Compile the model
model.compile(
optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]
)
# Fit the model
history = model.fit(
X_train1,
Y_train1,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_val, Y_val),
verbose=2,
callbacks=[learning_rate_reduction],
)
# ### Function to plot Loss and Accuracy Vs Epochs
import matplotlib.pyplot as plt
def plot_history(history):
loss_list = [s for s in history.history.keys() if "loss" in s and "val" not in s]
val_loss_list = [s for s in history.history.keys() if "loss" in s and "val" in s]
acc_list = [s for s in history.history.keys() if "acc" in s and "val" not in s]
val_acc_list = [s for s in history.history.keys() if "acc" in s and "val" in s]
if len(loss_list) == 0:
print("Loss is missing in history")
return
## As loss always exists
epochs = range(1, len(history.history[loss_list[0]]) + 1)
## Loss
plt.figure(1)
for l in loss_list:
plt.plot(
epochs,
history.history[l],
"b",
label="Training loss ("
+ str(str(format(history.history[l][-1], ".5f")) + ")"),
)
for l in val_loss_list:
plt.plot(
epochs,
history.history[l],
"g",
label="Validation loss ("
+ str(str(format(history.history[l][-1], ".5f")) + ")"),
)
plt.title("Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
## Accuracy
plt.figure(2)
for l in acc_list:
plt.plot(
epochs,
history.history[l],
"b",
label="Training accuracy ("
+ str(format(history.history[l][-1], ".5f"))
+ ")",
)
for l in val_acc_list:
plt.plot(
epochs,
history.history[l],
"g",
label="Validation accuracy ("
+ str(format(history.history[l][-1], ".5f"))
+ ")",
)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
plot_history(history)
from sklearn import metrics
actual = np.argmax(Y_test, axis=1)
predicted = np.argmax(model.predict(X_test), axis=1)
confusion_matrix = metrics.confusion_matrix(actual, predicted)
cm_display = metrics.ConfusionMatrixDisplay(
confusion_matrix=confusion_matrix, display_labels=["A", "B", "C", "D", "E"]
)
cm_display.plot()
plt.show()
from sklearn.metrics import accuracy_score, recall_score, f1_score
print("Accuracy = ", accuracy_score(actual, predicted))
print("Recall = ", recall_score(actual, predicted, average=None))
print("F1_Score = ", f1_score(actual, predicted, average=None))
|
# # Import Dependencies
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.figure_factory as ff
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import chi2
import missingno as msno
from sklearn.preprocessing import normalize
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_selection import f_classif
from scipy.stats import kendalltau
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
# # Load Data
train_path = "/kaggle/input/nebulanet/train.csv"
test_path = "/kaggle/input/nebulanet/test.csv"
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
# # Helper Functions
def radius_based_feature(x):
if x <= 0.015:
return "Pearl Dwarfs"
elif x <= 0.132:
return "Umber Dwarfs"
elif x <= 0.73:
return "Crimson Dwarfs"
elif x <= 10.2:
return "Aurelian Mainstays"
elif x <= 98.0:
return "Celestial Sovereigns"
else:
return "Cosmic Behemoths"
def ohe(df):
categorical_cols = ["Star color", "Spectral Class"]
processed_df = pd.get_dummies(df, columns=categorical_cols)
return processed_df
# ## EDA
# ### What features are good in star checking?
# - Temp in K
# - Mass relative to Sun
# - Radius relative to Sun
# - Luminosity
# (star's brightness and distance. Luminosity is closely related to a star's size and mass,)
# - Spectral Type
# (Different types of stars have different spectral features, which can be used to classify them. The spectral types are typically designated by letters, such as O, B, A, F, G, K, and M.)
# - Age
# (luminosity and temperature. Young stars are typically more luminous and hotter than older stars.)
# - Metallicity
# (This can be determined by analyzing its spectrum. Stars with higher metallicity are typically younger and more massive than those with lower metallicity.)
# - Absolute magnitude(Mv)
train_df.head()
train_df["Temperature (K)"].describe()
grouped = train_df.groupby(["Radius(R/Ro)", "Star type"]).size().unstack()
fig, ax = plt.subplots(figsize=(50, 22))
grouped.plot(kind="bar", stacked=True, ax=ax)
ax.set_xlabel("Radius(R/Ro)")
ax.set_ylabel("Count")
ax.set_title("Star Type Across Radius(R/Ro)")
# train_df['Probable_Label'] = train_df['Radius(R/Ro)'].apply(lambda x: radius_based_feature(x))
# test_df['Probable_Label'] = test_df['Radius(R/Ro)'].apply(lambda x: radius_based_feature(x))
df_categorical = train_df.select_dtypes("object")
for col in df_categorical.columns:
le = LabelEncoder()
df_categorical[col] = le.fit_transform(df_categorical[col])
x = df_categorical.drop(columns=["Star type"], axis=1)
y = df_categorical["Star type"]
chi_scores = chi2(x, y)
chi_scores
chi_values = pd.Series(chi_scores[0], index=x.columns)
chi_values.sort_values(ascending=False, inplace=True)
chi_values.plot.bar()
plt.xlabel("Features")
plt.ylabel("Chi Score")
print(chi_values)
print("Chi Score is directly proportional to Feature Importance")
p_vals = pd.Series(chi_scores[1], index=x.columns)
p_vals.sort_values(ascending=False, inplace=True)
p_vals.plot.bar()
plt.xlabel("Features")
plt.ylabel("p-value")
print(p_vals)
print(
"\n\nP-Value of associated Chi Score is inversely proportional to Feature Importance"
)
df_anova = train_df[
[
"Temperature (K)",
"Luminosity(L/Lo)",
"Radius(R/Ro)",
"Absolute magnitude(Mv)",
"Star type",
]
]
continous_feature_importance = {"feature": [], "f_stat": [], "p_val": []}
for i in df_anova.columns:
if i != "Star type":
f_s, p_v = f_classif(
df_anova[[i]].values.reshape(-1, 1),
df_anova[["Star type"]].values.reshape(-1, 1).ravel(),
)
continous_feature_importance["feature"].append(i)
continous_feature_importance["f_stat"].append(float(f_s))
continous_feature_importance["p_val"].append(float(p_v))
cont_features = pd.DataFrame.from_dict(continous_feature_importance)
cont_features
ax = px.bar(cont_features, x="feature", y="f_stat")
ax.show()
ax = px.bar(cont_features, x="feature", y="p_val")
ax.show()
L = train_df["Luminosity(L/Lo)"][0]
R = train_df["Radius(R/Ro)"][0]
sigma = 5.67 * (10**-8)
M = np.sqrt(L / (4 * np.pi * sigma * R**2))
M
X = train_df.drop("Star type", axis=1)
X = ohe(X)
y = train_df["Star type"]
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.2)
X_train
X_train.columns
d_train = lgb.Dataset(X_train, label=y_train)
params = {}
params["learning_rate"] = 0.03
params["boosting_type"] = "gbdt" # GradientBoostingDecisionTree
params["objective"] = "multiclass" # Multi-class target feature
params["metric"] = "multi_logloss" # metric for multi-class
params["max_depth"] = 10
params[
"num_class"
] = 6 # no.of unique values in the target class not inclusive of the end value
# params["class_weight "]=scale_pos_weight
clf = lgb.train(params, d_train, 100) # train the model on 100 epocs
# # Test Accuracy
y_pred = clf.predict(X_test)
y_pred = [np.argmax(line) for line in y_pred]
print(classification_report(y_test, y_pred))
# # Train Accuracy
y_pred_train = clf.predict(X_train)
y_pred_train = [np.argmax(line) for line in y_pred_train]
print(classification_report(y_train, y_pred_train))
def create_model(d_train, epochs=100):
params = {}
params["learning_rate"] = 0.03
params["boosting_type"] = "gbdt" # GradientBoostingDecisionTree
params["objective"] = "multiclass" # Multi-class target feature
params["metric"] = "multi_logloss" # metric for multi-class
params["max_depth"] = 10
params[
"num_class"
] = 6 # no.of unique values in the target class not inclusive of the end value
params["class_weight "] = scale_pos_weight
clf = lgb.train(params, d_train, epochs) # train the model on 200 epocs
return clf
X["Star color_Yellowish"] = 0
d_train_final = lgb.Dataset(X, label=y)
scale_pos_weight = len(y) / (6 * np.bincount(y))
final_model = create_model(d_train_final, epochs=100)
test_df = ohe(test_df)
test_df
considered_vars_test = [
"Temperature (K)",
"Luminosity(L/Lo)",
"Radius(R/Ro)",
"Absolute magnitude(Mv)",
"Star color_Blue",
"Star color_Blue ",
"Star color_Blue White",
"Star color_Blue white",
"Star color_Blue white ",
"Star color_Blue-White",
"Star color_Blue-white",
"Star color_Orange",
"Star color_Orange-Red",
"Star color_Pale yellow orange",
"Star color_Red",
"Star color_White",
"Star color_White-Yellow",
"Star color_Whitish",
"Star color_Yellowish White",
"Star color_white",
"Star color_yellow-white",
"Star color_yellowish",
"Spectral Class_A",
"Spectral Class_B",
"Spectral Class_F",
"Spectral Class_G",
"Spectral Class_K",
"Spectral Class_M",
"Spectral Class_O",
]
testcols = test_df.columns
for i in considered_vars_test:
if i not in testcols:
test_df[i] = 0
test_df
print(test_df.shape)
print(X.shape)
y_pred = final_model.predict(test_df)
y_pred = [np.argmax(line) for line in y_pred]
# print("-----------ON TEST--------- ")
# print(classification_report(y_test, y_pred))
# print("-----------ON Train--------- ")
# y_pred_train=final_model.predict(X_train)
# y_pred_train = [np.argmax(line) for line in y_pred_train]
# print(classification_report(y_train, y_pred_train))
y_pred
predictions = list(le.inverse_transform(y_pred))
sub = pd.read_csv("/kaggle/input/nebulanet/samplesolutions.csv")
sub["Star type"] = predictions
sub.head()
sub.to_csv("tapu_sena_submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
train
len(train.loc[train["target"] == 1]) / len(train)
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import nltk
nltk.download("omw-1.4")
stopwords = nltk.corpus.stopwords.words("english") + ["http", "https"]
wordnet_lemmatizer = WordNetLemmatizer()
def preprocessing(document):
# get rid of @, maybe cut words < 3 characters
res = word_tokenize(document.lower())
res = [word for word in res if (word not in stopwords and word.isalnum())]
res = [("$NUMBER" if word.isnumeric() else word) for word in res]
res = [wordnet_lemmatizer.lemmatize(word) for word in res]
# res = [word for word in res if len(word) > 3]
return res
corpus = train["text"].map(preprocessing).values.tolist()
import gensim.downloader
glove_vectors = gensim.downloader.load("glove-twitter-100")
glove_vectors
count = 0
for document in corpus:
for word in document:
if word == "numeric":
count += 1
print(count)
glove_vectors["numeric"]
from sklearn.feature_extraction.text import TfidfVectorizer
tf_corpus = list(map(lambda elem: " ".join(elem), corpus))
tfidf = TfidfVectorizer(use_idf=True)
tfidf.fit_transform(tf_corpus)
idf = dict(zip(tfidf.get_feature_names_out(), tfidf.idf_))
import numpy as np
def w2v(document):
res = [
word
for word in document
if (word == "$NUMBER" or (word in glove_vectors and word in idf))
]
res_vectors = [
(
glove_vectors["numeric"] * idf["number"]
if word == "$NUMBER"
else glove_vectors[word] * idf[word]
)
for word in res
]
sum_res = sum([idf["number"] if word == "$NUMBER" else idf[word] for word in res])
if sum_res == 0:
return np.zeros(100)
return np.array(sum(res_vectors) / sum_res)
# corpus
features = list(map(w2v, corpus))
feature_targets = np.array(train["target"])
x_train = []
y_train = []
for i, elem in enumerate(features):
if np.isnan(elem).any():
continue
x_train.append(elem)
y_train.append(feature_targets[i])
x_train = np.array(x_train)
y_train = np.array(y_train)
print(x_train.shape)
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(
hidden_layer_sizes=(100, 100),
random_state=1,
max_iter=300,
verbose=True,
learning_rate_init=0.0001,
early_stopping=True,
n_iter_no_change=20,
).fit(x_train, y_train)
test
corpus_test = test["text"].map(preprocessing).values.tolist()
features_test = list(map(w2v, corpus_test))
x_test = []
for i, elem in enumerate(features_test):
if np.isnan(elem).any():
x_test.append(np.zeros(100))
else:
x_test.append(elem)
x_test = np.array(x_test)
predictions = clf.predict(x_test)
res = test
res["target"] = predictions
res = res.drop(columns={"keyword", "location", "text"})
res.to_csv("submission.csv", index=False)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("../input/suv-nanze/suv.csv")
df.drop("User ID", axis=1, inplace=True)
df.head(5)
df.Gender = pd.get_dummies(df.Gender, drop_first=True)
df.head()
X = df.to_numpy()
np.random.seed = 0
X = X[np.random.permutation(X.shape[0])]
y = X[:, -1]
X = X[:, :-1]
split = int(X.shape[0] * 0.8)
X_train = X[:split]
y_train = y[:split]
X_test = X[split:]
y_test = y[split:]
from sklearn.svm import SVC
# svc == svm classifier
# one of the params of SVC > C
# in regularization = 1/c > C high > regularization low > overfit > the classification part is too string
# in svm > overfit means find a line to classify the data with high accuracy in this case the marging is not imp
# while the best line may have error for some data bue has the highest margin
# degeree for adding the degree of polynomial for the kernel of poly
# gamma var of the kernel > the higher the gamma the openner the kernels > we consider data with further distances
# C=1, kernel='rbf' > rbf is not the best all of the times > addind rbf means adding the parameters may be overfit
clf = SVC()
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
# the differnce between the train and test are too high > change the training dataset > normalize
X = (X - X.mean(axis=0)) / X.std(axis=0)
split = int(X.shape[0] * 0.8)
X_train = X[:split]
y_train = y[:split]
X_test = X[split:]
y_test = y[split:]
clf = SVC()
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
"""
the train score is lower than test score
it can be for the hardship of the train
the noise
or many types of regularization that may influence
"""
# kind if underfit > low the reg > high the c
clf = SVC(C=10000000, kernel="rbf")
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
clf = SVC(C=0.000001, kernel="rbf")
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
"""
rbf > adds one dimension > in the picture
in reality > the new dim is the #datas
linear > do nothing
poly > adds the polynomial degreees
the data are linear separable
exp > the rbf tries to separate them with a circle > !!!
"""
clf = SVC(kernel="linear")
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
clf = SVC(kernel="poly")
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
clf.predict(X_test)
preds = clf.predict(X_test)
np.mean(preds == y_test)
# by default the model doesn't outputs the probability
clf = SVC(kernel="poly", probability=True)
clf.fit(X_train, y_train)
print(clf.predict_proba(X_test))
"""
in log reg > it first calculates the probability > then thereshold > then class
in svm > the probaility not calculated first
"""
train_scores = []
test_scores = []
for c in np.arange(0.1, 10, 0.1):
clf = SVC(C=c, kernel="linear", max_iter=50)
clf.fit(X_train, y_train)
train_scores.append(clf.score(X_train, y_train))
test_scores.append(clf.score(X_test, y_test))
plt.plot(np.arange(0.1, 10, 0.1), np.array(train_scores), "b-")
plt.plot(np.arange(0.1, 10, 0.1), np.array(test_scores), "r-")
train_scores = []
test_scores = []
for c in np.arange(0.1, 10, 0.1):
clf = SVC(C=c, kernel="linear")
clf.fit(X_train, y_train)
train_scores.append(clf.score(X_train, y_train))
test_scores.append(clf.score(X_test, y_test))
plt.plot(np.arange(0.1, 10, 0.1), np.array(train_scores), "b-")
plt.plot(np.arange(0.1, 10, 0.1), np.array(test_scores), "r-")
plt.plot(np.arange(0.1, 10, 0.1), np.array(train_scores), "bo")
plt.plot(np.arange(0.1, 10, 0.1), np.array(test_scores), "ro")
# but when the plots are not close to each other
x = [0.01, 0.1, 1, 10, 1000]
train_scores = []
test_scores = []
for c in x:
clf = SVC(C=c, kernel="linear")
clf.fit(X_train, y_train)
train_scores.append(clf.score(X_train, y_train))
test_scores.append(clf.score(X_test, y_test))
plt.plot(x, np.array(train_scores), "bo")
plt.plot(x, np.array(test_scores), "ro")
plt.plot(x, np.array(train_scores), "b-")
plt.plot(x, np.array(test_scores), "r-")
# the s axis is log now
plt.xscale("log")
plt.plot(x, np.array(train_scores), "b-")
plt.plot(x, np.array(test_scores), "r-")
plt.plot(X[y == 1, 1], X[y == 1, 2], "ro")
plt.plot(X[y == 0, 1], X[y == 0, 2], "bo")
pred = clf.predict(X)
plt.plot(X[pred == 1, 1], X[pred == 1, 2], "ro")
plt.plot(X[pred == 0, 1], X[pred == 0, 2], "bo")
# it is the border that svm learns
# the border is not line > because x has 3 fetures we only picture the 2 features
|
import spacy
nlp = spacy.load("en")
f1 = open("../input/sunil-files/f1.txt", "r")
f1_contents = f1.read()
f11 = nlp(f1_contents)
f2 = open("../input/sunil-files/f2.txt", "r")
f2_contents = f2.read()
f22 = nlp(f2_contents)
print(f11)
print(f22)
print(f11.similarity(f22))
# # Problem Statement
# We humans have been using glass since ancient times for a variety of applications from building construction to making decorative objects. With technology, glass and its applications have evolved, and today, we have different varieties of glass used for very different purposes from a computer monitor to a bulletproof car window depending on the grade of the glass produced. And not all grades or varieties are manufactured the same way. In this data science challenge, you as a data scientist must use the given data to predict the grade of the glass produced based on the given factors.
# Given are 15 distinguishing factors that can provide insight into what grade of the glass is being produced. Your objective as a data scientist is to build a machine learning model that can predict the grade of glass based on the given factors.
# # Phase1: Model Building On Training Data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# # Step1: Read Data
train = pd.read_csv("../input/test-v123445/test.csv")
train.head()
df_new = df.iloc[:10]
df_new.to_csv("test.csv")
train.shape
train.columns
# # Step2: Exploratory Data Analysis
# Below are the steps involved to understand, clean and prepare your data for building your predictive model:
# 1. Variable Identification
# 2. Univariate Analysis
# 3. Bi-variate Analysis
# 4. Missing values treatment
# 5. Outlier treatment
# 6. Variable transformation
# 7. Variable creation
# ## 2.1 Missing Data Analysis
train.isnull().sum()
# ## 2.2 Data Type Analysis
train.dtypes
# ## 2.3 Univariate Analysis
# At this stage, we explore variables one by one. Method to perform uni-variate analysis will depend on whether the variable type is categorical or continuous. Let’s look at these methods and statistical measures for categorical and continuous variables individually:
# Continuous Variables:- In case of continuous variables, we need to understand the central tendency and spread of the variable. These are measured using various statistical metrics such as Histogram and Bar plots:
train.describe()
# ### 2.3.1 Box Plot of CONTINUOUS variables
train.columns
plt.figure(figsize=(16, 10))
plt.subplot(2, 2, 1)
train.boxplot(column=["ymin", "ymax"])
plt.subplot(2, 2, 2)
train.boxplot(column=["pixel_area", "log_area"])
plt.subplot(2, 2, 3)
train.boxplot(column=["max_luminosity", "thickness"])
plt.subplot(2, 2, 4)
train.boxplot(column=["xmin", "xmax"])
# From the plots we can see that, there are lots of outliers in each varibale.
# ### 2.3.2 Plot for Continuous variables
# ### 2.3.3 Histogram Plots Of Continuous Variables
plt.figure(figsize=(14, 8))
clr = ["red", "blue", "lime", "orange", "teal", "red", "blue", "lime"]
columns = [
"max_luminosity",
"thickness",
"xmin",
"xmax",
"ymin",
"ymax",
"pixel_area",
"log_area",
]
for i, j in zip(range(1, 9), columns):
plt.subplot(4, 2, i)
train[j].hist(color=clr[i - 1], label=j)
plt.legend()
# ### 2.3.4 Density Plots Of Continuous Variables
plt.figure(figsize=(14, 8))
train[columns].plot(
kind="density",
subplots=True,
layout=(4, 2),
sharex=False,
sharey=False,
figsize=(14, 6),
)
plt.show()
# ### 2.3.5 Discrete Variables Plot
train.columns
plt.figure(figsize=(14, 12))
plt.subplot(4, 2, 1)
train.grade_A_Component_1.value_counts().plot(kind="bar", label="grade_A_Component_1")
plt.legend()
plt.subplot(4, 2, 2)
train.grade_A_Component_2.value_counts().plot(kind="bar", label="grade_A_Component_2")
plt.legend()
plt.subplot(4, 2, 3)
train.x_component_1.value_counts().plot(kind="bar", label="x_component_1")
plt.legend()
plt.subplot(4, 2, 4)
train.x_component_2.value_counts().plot(kind="bar", label="x_component_2")
plt.legend()
plt.subplot(4, 2, 5)
train.x_component_3.value_counts().plot(kind="bar", label="x_component_3")
plt.legend()
plt.subplot(4, 2, 6)
train.x_component_4.value_counts().plot(kind="bar", label="x_component_4")
plt.legend()
plt.subplot(4, 2, 7)
train.x_component_4.value_counts().plot(kind="bar", label="x_component_4")
plt.legend()
# ### 2.3.5 Target Variable Plot
train["class"].value_counts().plot(kind="bar")
train["class"].value_counts()
# # 2.4 Bi-variate Analysis
# Bi-variate Analysis finds out the relationship between two variables. Here, we look for association and disassociation between variables at a pre-defined significance level. We can perform bi-variate analysis for any combination of categorical and continuous variables. The combination can be: Categorical & Categorical, Categorical & Continuous and Continuous & Continuous. Different methods are used to tackle these combinations during analysis process.
# ### 2.4.2 Scatterplot Matrix
import seaborn as sns
sns.set(style="ticks")
sns.pairplot(train)
# # Step4: Separating X and Y
train.columns
# ## 4.1 Re-setting Index Before Splitting
train.reset_index(drop=True, inplace=True)
# ## 4.2 Split Data
x = train.drop(["class"], axis=1)
y = train["class"]
x_copy = x.copy()
# # Step5: Creating Train and Test Set In Ratio 80:20
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=100
)
# # Step6: Model Building
# ## 6.1 Identification Of Best Features
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
# feature extraction
model = LogisticRegression(solver="lbfgs")
rfe = RFE(model, 3)
fit = rfe.fit(x, y)
print("Num Features: %d" % fit.n_features_)
print("Selected Features: %s" % fit.support_)
print("Feature Ranking: %s" % fit.ranking_)
df_feat = pd.DataFrame(fit.ranking_, x.columns)
df_feat.rename(columns={0: "Feature_Ranking"}, inplace=True)
df_feat.sort_values(by="Feature_Ranking").plot(kind="bar", figsize=(18, 7))
# ## 6.2 Importing and Model Fitting
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import (
roc_curve,
auc,
classification_report,
confusion_matrix,
accuracy_score,
roc_auc_score,
)
from sklearn.metrics import classification_report
# ### 6.2.2 Random Forest
from sklearn.ensemble import RandomForestClassifier
# making the instance
model = RandomForestClassifier(random_state=1234)
# Hyper Parameters Set
param_grid = {
"criterion": ["gini", "entropy"],
"n_estimators": [10, 15, 20, 25, 30],
"min_samples_leaf": [1, 2, 3],
"min_samples_split": [3, 4, 5, 6, 7],
"random_state": [123],
"n_jobs": [-1],
}
# Create grid search object
clf = GridSearchCV(model, param_grid=param_grid, n_jobs=-1, cv=10)
# Fit on data
best_clf_rf = clf.fit(X_train, y_train)
# Predict
predictions = best_clf_rf.predict(X_test)
# Check Prediction Score
print("Accuracy of Random Forest: ", accuracy_score(y_test, predictions))
# Print Classification Report
print("Confusion matrix \n", confusion_matrix(y_test, predictions))
print(classification_report(y_test, predictions))
# RF On Full data
# making the instance
model = RandomForestClassifier(random_state=1234)
# Hyper Parameters Set
param_grid = {
"criterion": ["gini", "entropy"],
"n_estimators": [10, 15, 20, 25, 30],
"min_samples_leaf": [1, 2, 3],
"min_samples_split": [3, 4, 5, 6, 7],
"random_state": [123],
"n_jobs": [-1],
}
# Create grid search object
clf = GridSearchCV(model, param_grid=param_grid, n_jobs=-1, cv=10)
# Fit on data
best_clf_rf1 = clf.fit(x, y)
# # Phase2: Applying Model On Test Data
test = pd.read_csv(
"../input/glass-quality-prediction/Glass_Quality_Participants_Data/Test.csv"
)
test.shape
test.head(5)
test.columns
test_for_prediction = test[
[
"grade_A_Component_1",
"grade_A_Component_2",
"max_luminosity",
"thickness",
"xmin",
"xmax",
"ymin",
"ymax",
"pixel_area",
"log_area",
"x_component_1",
"x_component_2",
"x_component_3",
"x_component_4",
"x_component_5",
]
]
# Define predict function
def predict_file(model, model_instance, test_data):
prediction_var = "prediction_from" + model
file_name = "Final_output_prediction_from_" + model + ".xlsx"
prediction_var = model_instance.predict_proba(test_data)
df_prediction_var = pd.DataFrame(prediction_var, columns=[1, 2])
df_prediction_var.to_excel(file_name)
print("{} created.".format(file_name))
predict_file("rf_classifier", best_clf_rf, test_for_prediction)
predict_file("rf1_classifier", best_clf_rf1, test_for_prediction)
|
# 
# # The Iris dataset is a classic machine learning dataset that is often used for classification problems. It is sometimes referred to as the "Hello World" of machine learning because it is a simple and well-understood dataset that is often used for teaching and learning purposes.
# ## The dataset consists of 150 samples, each representing an iris flower. Each sample has four features: sepal length, sepal width, petal length, and petal width. The target variable is the species of the iris flower, which can be one of three classes: setosa, versicolor, and virginica.
# ### Here is a summary of the dataset:
# ### Number of samples: 150
# ### Number of features: 4
# ### Target variable: Species (setosa, versicolor, virginica)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
df = pd.read_csv("/kaggle/input/iris-classifier/iris.csv")
# Load the Iris dataset
iris = load_iris()
X = iris.data
y = iris.target
# Split the data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Define the classification models
models = {
"Decision Tree": DecisionTreeClassifier(),
"Logistic Regression": LogisticRegression(),
"K-Nearest Neighbors": KNeighborsClassifier(),
"Support Vector Machine": SVC(kernel="linear"),
}
# Train and evaluate each model
results = {}
for name, model in models.items():
# Train the model
model.fit(X_train, y_train)
# Make predictions on the test set
y_pred = model.predict(X_test)
# Evaluate the model using accuracy score and classification report
accuracy = accuracy_score(y_test, y_pred)
report = classification_report(y_test, y_pred, output_dict=True)
# Add the evaluation metrics to the results dictionary
results[name] = {
"Accuracy": accuracy,
"Precision": report["macro avg"]["precision"],
"Recall": report["macro avg"]["recall"],
"F1-score": report["macro avg"]["f1-score"],
}
# Print the evaluation metrics for all models
print("Evaluation Metrics:")
print("---------------------------------------")
print(
"{:<25s}{:<10s}{:<10s}{:<10s}{:<10s}".format(
"Model", "Accuracy", "Precision", "Recall", "F1-score"
)
)
print("---------------------------------------")
for name, metrics in results.items():
print(
"{:<25s}{:<10.2f}{:<10.2f}{:<10.2f}{:<10.2f}".format(
name,
metrics["Accuracy"],
metrics["Precision"],
metrics["Recall"],
metrics["F1-score"],
)
)
print("---------------------------------------")
|
# # Challenge DataViz - Jean-Philippe Gabrielli
# ## Librairies nécessaires
# pip install pandas==1.5.3 numpy dask geopy matplotlib scipy folium ipywidgets plotly dash fastparquet
# attention ne pas installer pandas 2, sinon la visualisation web avec dash ne marchera pas.
# ## Importation des librairies
import pandas as pd
import numpy as np
from dask import dataframe as dd
from geopy.geocoders import Nominatim
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from scipy.spatial.distance import cdist
from scipy.stats import pearsonr
from folium import plugins
from folium.plugins import HeatMap
from scipy import signal
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
import pandas as pd
import folium
import ipywidgets as widgets
from IPython.display import display
from IPython.display import clear_output
from ipywidgets import interact
from numpy.fft import fftshift, fftfreq
import plotly.express as px
from ipywidgets import interact
geolocator = Nominatim(user_agent="geoapiExercises")
# # Importation des données météo
"""
data2016 = dd.read_parquet('SE2016.parquet') #ne surtout pas remplacer ça par pandas, out of memory en vue
data2017 = dd.read_parquet('SE2017.parquet')
data2018 = dd.read_parquet('SE2018.parquet')
"""
# #### Sélection des stations météo corses
"""
data_meteo2017 = data2017[(data2017.lon>8.35) & (data2017.lat<43.14)]
data_meteo2018 = data2018[(data2018.lon>8.35) & (data2018.lat<43.14)]
data_meteo2016 = data2016[(data2016.lon>8.35) & (data2016.lat<43.14)]
"""
# #### Fusion des données
"""
data_meteo = dd.concat([data_meteo2016, data_meteo2017,data_meteo2018],axis=0)
data_meteo.t = data_meteo.t-273.15 #conversion en celsius
"""
# #### Indexation de toutes les stations météo
"""
coord_stations = data_meteo[['lat', 'lon']].drop_duplicates().compute() #attention prend 2-3 minutes à exécuter
pd.DataFrame(coord_stations)
coord_stations.to_parquet("coord_stations.parquet")
"""
coord_stations = pd.read_parquet(
"/kaggle/input/dataviz2023/datavizjp/coord_stations.parquet"
)
# #### Interpolation en données horaires
# On scinde le fichier avec l'extension .parquet en plusieurs fichiers, chacun associé à une station météo. Cela permet de réduire de façon importante le temps d'importation des données.
"""
for index, row in coord_stations.iterrows():
lat, lon = row['lat'], row['lon']
data_selec = data_meteo[(data_meteo['lat'] == lat) & (data_meteo['lon'] == lon)]
data_selec.to_parquet(f'data_meteo_{lat}_{lon}.parquet')
"""
# On réalise une interpolation linéaire pour obtenir dans tous les cas des données horaires à partir de données non régulièrement espacéees avec des pas de temps différents.
""""
for index, row in coord_stations.iterrows():
lat, lon = row['lat'], row['lon']
data = pd.read_parquet(f'data_meteo_{lat}_{lon}.parquet') #ici on peut utiliser pandas vu la taille des fichiers
data = data.set_index('date')
data = data.resample('H').interpolate()
data.to_parquet(f'data_meteo_horaire_{lat}_{lon}.parquet')
"""
# #### Importation des données interpolées
data_meteo_horaire = {}
for index, row in coord_stations.iterrows():
lat, lon = row["lat"], row["lon"]
file_name = f"data_meteo_horaire_{lat}_{lon}.parquet"
data_meteo_horaire[file_name] = pd.read_parquet(
f"/kaggle/input/dataviz2023/datavizjp/{file_name}"
)
data_meteo_horaire[file_name].columns = [
"number_sta",
"lat",
"lon",
"height_sta",
"Direction du vent",
"force du vent",
"précipitations",
"humidité",
"température de rosée",
"température",
"pression",
]
# #### Vérification de l'obtention du pas horaire par visualisation des 10 premières données
data_meteo_horaire["data_meteo_horaire_42.887_9.44.parquet"].head(10)
# # Importation des données de production électrique
"""
prod = pd.read_parquet('/kaggle/input/dataviz2023/datavizjp/production-delectricite-par-filiere.parquet') #ici pandas est suffisant
"""
# #### Remplacement des données estimées négatives par la valeur zero et conversion du temps universel sur le fuseau horaire UTC + 1
"""
prod['Date - Heure'] = pd.to_datetime(prod['Date - Heure'],utc = True)
prod['Date - Heure'] = prod['Date - Heure'].dt.tz_convert('Europe/Paris')
prod = prod.applymap(lambda x: 0 if isinstance(x, (int, float)) and not isinstance(x, (np.datetime64, pd.Timestamp)) and x < 0 else x)
"""
# On choisit la date comme indice de référencement (index)
"""
prod = prod.set_index('Date - Heure')
"""
"""
prod = prod[~prod.index.duplicated()] #on enlève les duplicatas
"""
"""
prod = prod.resample('H').interpolate() #interpolation horaire
prod.to_parquet('prod_horaire.parquet')
"""
prod_horaire = pd.read_parquet(
"/kaggle/input/dataviz2023/datavizjp/prod_horaire.parquet"
)
# #### Visualisation des 10 premières lignes des données de production
prod_horaire.head(10)
# # Importation de la consommation électrique (MWh) par commune et par an
"""
conso = pd.read_parquet('consommation-annuelle-par-commune1.parquet')
"""
# ### Ajout des coordonnées géographiques de chaque commune (latitude et longitude)
""""
communes = conso['Commune'].drop_duplicates()
lat =[]
lon= []
for i in communes:
lat.append(get_location_by_name(i)[0])
lon.append(get_location_by_name(i)[1])
"""
"""
f = pd.DataFrame({'lat': lat, 'lon': lon, 'commune': communes})
f = f.set_index('commune')
df_merged = pd.merge(conso, f, left_on='Commune', right_on='commune', how='left')
df_merged.to_csv('consolatlon.csv')
"""
conso = pd.read_csv("/kaggle/input/dataviz2023/datavizjp/consolatlon.csv")
# #### Visualisation des 10 premières communes
conso.head(10)
# ## Importation de la production annuelle par source d'énergie (en MWh)
prod_an_fil = pd.read_csv(
"/kaggle/input/dataviz2023/datavizjp/production-annuelle-delectricite-par-filiere.csv",
sep=";",
index_col=0,
)
prod_an_fil = prod_an_fil.set_index(["Année"])
# #### Visualisation des cinq années entre 2018 et 2020
prod_an_fil.head(10)
# ## Exploitation des données
# ### Coefficient de Pearson
# #### Le coefficient de Pearson est un outil statistique qui permet d'analyser la relation linéaire entre deux séries de données.
# Ce coefficient de corrélation varie entre -1 et 1, la valeur 0 reflétant une relation nulle entre les deux séries de données. Une valeur négative signifie que lorsque une série de donnée augmente l'autre diminue ; tandis qu'une valeur positive indique que les deux séries de données varient ensemble dans le même sens.
# #### Calcul du coefficient de Pearson pour toutes les données météo de chaque station
def pearson(data1, mode):
pearson = []
lieu = []
lat_list = []
lon_list = []
for index, row in coord_stations.iterrows():
lat, lon = row["lat"], row["lon"]
file_name = f"data_meteo_horaire_{lat}_{lon}.parquet"
x = data_meteo_horaire[file_name][data1]
t = data_meteo_horaire[file_name].index
y = prod_horaire.loc[t][mode]
pearson.append(x.corr(y, method="pearson"))
lieu.append(file_name)
lat_list.append(lat)
lon_list.append(lon)
df_pearson = pd.DataFrame(pearson, columns=["pearson"])
df_lieu = pd.DataFrame(lieu, columns=["lieu"])
df_lat = pd.DataFrame(lat_list, columns=["lat"])
df_lon = pd.DataFrame(lon_list, columns=["lon"])
coeff_pearson = pd.concat([df_pearson, df_lieu, df_lat, df_lon], axis=1)
return coeff_pearson
# On sélectionne dans la carte ci-dessous le site dont on veut connaître le coefficient de Pearson, c'est à dire la corrélation des données météo avec les données de production.
# Cela peut mettre en évidence les régions où la consommation est fortement corrélée aux écarts de température par exemple.
# Pour prendre un exemple, si on sélectionne la force du vent avec la production éolienne, on voit de plus grands cercles sur le Cap Corse, endroit où se concentrent les éoliennes.
def carte_pearson(data1, mode):
coeff_pearson = pearson(data1, mode)
coeff_pearson["pearson"] = coeff_pearson["pearson"].fillna(0)
map = folium.Map(
location=[coeff_pearson["lat"].mean(), coeff_pearson["lon"].mean()],
zoom_start=8,
)
for index, row in coeff_pearson.iterrows():
folium.CircleMarker(
location=[row["lat"], row["lon"]],
radius=6 / (1 - abs(row["pearson"])),
color="red",
fill=True,
fill_color="black",
popup=f"Coefficient de Pearson: {round(row['pearson'], 2)}",
).add_to(map)
return map
data_meteo_horaire["data_meteo_horaire_42.887_9.44.parquet"] = data_meteo_horaire[
"data_meteo_horaire_42.887_9.44.parquet"
].drop(["number_sta", "lon", "lat", "height_sta"], axis=1)
prod_horaire = prod_horaire.drop(
["Territoire", "Statut", "Coût moyen de production (€/MWh)"], axis=1
)
@interact
def select_data_and_display_map(
data1=[
"Direction du vent",
"force du vent",
"précipitations",
"humidité",
"température de rosée",
"température",
"pression",
],
mode=prod_horaire.columns,
):
map = carte_pearson(data1, mode)
display(map)
# ## Etude de la périodicité des séries temporelles de production en utilisant la transformée de Fourier (FFT)
# #### Un pic est observé à chaque fois qu'un évènement périodique d'ampleur est observé dans les données
# En appliquant la transformée de Fourrier aux données de production, on peut visualiser et interpréter les habitudes de consommation de la population.
# Ainsi, comme on peut le constater sur le graphique ci-dessous, on observe un pic d'amplitude importante par jour, ce qui correspond à une augmentation de consommation quotidienne le soir. On observe également un pic qui apparait à une fréquence de 2 par jour, ce qui correspond à une période d'une demi-journée, et donc d'une hausse de la consommation le matin et le soir.
# On peut aussi étudier quantitativement les phénomènes observés : on observe que l'amplitude de la périodicité journalière est plus importante que celle de la périodicité bi-journalière.
def fourier(prod_data, column):
fourier = np.abs(np.fft.fft(prod_data[column] - np.mean(prod_data[column])))
fourier_shifted = fourier[: len(fourier) // 2]
freqs = fftfreq(len(fourier), d=1)[: len(fourier) // 2]
freqs_cycles_per_day = freqs * 24
fourier_fig = px.line(
x=freqs_cycles_per_day,
y=fourier_shifted,
title="Transformée de Fourier de la production d'électricité totale",
labels={"x": "Cycles par jour", "y": "amplitude du phénomène"},
)
return fourier_fig
fourier(prod_horaire, "Production totale (MW)")
# ## Visualisation de la consommation par commune
def plot_conso(commune):
filtered_df = conso.loc[conso["Commune"] == commune]
grouped = filtered_df.groupby("Année")["Consommation (MWh)"].sum()
grouped.plot(kind="bar")
# Calcul des coefficients de la courbe de tendance linéaire
z = np.polyfit(grouped.index, grouped.values, 4)
p = np.poly1d(z)
# Tracé de la courbe de tendance
plt.plot(p(grouped.index), "r--")
plt.ylabel("consommation totale (MWh)")
plt.show()
# On peut tracer l'évolution de la consommation annuelle pour chaque commune, par exemple ici pour Tomino
plot_conso("TOMINO")
# ### Visualisation de l'évolution de la consommation totale d'éléctricité annuelle par commune
# Créer un menu déroulant pour sélectionner la commune
commune_select = widgets.Dropdown(
options=conso["Commune"].unique(),
description="Commune:",
disabled=False,
)
# Fonction pour mettre à jour le graphique lorsque la commune est modifiée
def update_plot(change):
clear_output(wait=True)
display(commune_select)
plot_conso(change.new)
# Connecter le menu déroulant à la fonction de mise à jour
commune_select.observe(update_plot, names="value")
# Afficher le menu déroulant
display(commune_select)
# Afficher le graphique initial
plot_conso(commune_select.value)
# ### Identification des communes présentant les plus fortes croissances en terme de consommation d'énergie ces trois dernières années
def top_croissance(df, n):
# Récupérer les trois dernières années dans les données
last_years = df["Année"].unique()[-3:]
# Filtrer les données pour les trois dernières années
data = df[df["Année"].isin(last_years)]
# Calculer la consommation totale pour chaque commune et chaque année
consumption = (
data.groupby(["Commune", "Année"])["Consommation (MWh)"].sum().reset_index()
)
# Pivoter le tableau pour avoir une colonne par année
consumption_pivot = consumption.pivot(
index="Commune", columns="Année", values="Consommation (MWh)"
)
# Calculer l'augmentation de la consommation entre la première et la dernière année
consumption_pivot["increase"] = (
consumption_pivot[last_years[-1]] - consumption_pivot[last_years[0]]
)
# Trier les communes par augmentation de la consommation
top_communes = consumption_pivot.sort_values("increase", ascending=False).head(n)
return top_communes.index.tolist()
# Une augmentation de la consommation énergétique est un bon indicateur de la croissance démographique voire économique d'une commune. On présente ci-dessous la liste des 10 communes avec la plus forte augmentation de consommation d'énergie.
top_croissance(conso, 10)
# #### Visualisation des consommations énergétiques les plus importantes :
# Ci-dessous on peut choisir l'année dans un menu déroulant souhaitée pour apprécier la consommation de chaque commune, plus le cercle important, plus la consommation est élevée.
def create_map(df, year):
# Créer une carte centrée sur la Corse
map = folium.Map(location=[42.0396042, 9.0128926], zoom_start=9)
df = df.dropna(subset=["lat", "lon"])
# Filtrer les données pour l'année sélectionnée
data = df[df["Année"] == year]
# Ajouter des cercles pour chaque commune
for i, row in data.iterrows():
folium.Circle(
location=[row["lat"], row["lon"]],
radius=row["Consommation (MWh)"] / 10,
color="blue",
fill=True,
fill_color="blue",
popup=f"consommation en MWh: {round(row['Consommation (MWh)'])}",
).add_to(map)
# Afficher la carte
return map
# Créer un menu déroulant pour sélectionner l'année
year_select = widgets.Dropdown(
options=conso["Année"].unique(),
description="Année:",
disabled=False,
)
def update_map(change):
clear_output(wait=True)
display(year_select)
map = create_map(conso, change.new)
display(map)
# Connecter le menu déroulant à la fonction de mise à jour
year_select.observe(update_map, names="value")
# Afficher le menu déroulant
display(year_select)
# Afficher la carte initiale
map = create_map(conso, year_select.value)
display(map)
# ## Visualisation des sources d'énergie électrique par année
prod_an_fil = prod_an_fil.fillna(0)
# Créer une fonction pour générer le graphique en camembert
def pie_chart(year):
data = prod_an_fil.loc[year]
data = data.drop(["Total (MWh)"])
labels = data.index
values = data.values
fig, ax = plt.subplots()
ax.pie(values, labels=labels, autopct="%1.1f%%")
ax.set_title(f"Sources d'énergie électrique en {year}")
ax.legend(bbox_to_anchor=(1, 0.5), loc="center left")
plt.show()
# Créer un menu déroulant pour sélectionner l'année
interact(pie_chart, year=prod_an_fil.index)
# Créer une fonction pour générer le graphique à barres
def bar_chart(year):
data = prod_an_fil.loc[year]
labels = data.index
values = data.values
fig, ax = plt.subplots()
ax.bar(labels, values)
ax.set_title(f"Sources d'énergie électrique en {year}")
plt.xticks(rotation=90)
plt.show()
# Créer un menu déroulant pour sélectionner l'année
interact(bar_chart, year=prod_an_fil.index)
|
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# # Load the data
data = pd.read_csv("/kaggle/input/co2-emissions/CO2 Emissions.csv")
# # Explore the data
print(data.head())
# # Divide the data into the training and test datasets
X = data[
[
"Engine Size(L)",
"Cylinders",
"Fuel Consumption City (L/100 km)",
"Fuel Consumption Hwy (L/100 km)",
"Fuel Consumption Comb (L/100 km)",
"Fuel Consumption Comb (mpg)",
]
]
y = data["CO2 Emissions(g/km)"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
# # Normalize or scale the data
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# # Build the model
model = Sequential()
model.add(Dense(20, activation="relu", input_shape=(6,)))
model.add(Dense(10, activation="relu"))
model.add(Dense(1))
# # Train the model
model.compile(optimizer="adam", loss="mse", metrics=["mse", "mae"])
history = model.fit(X_train, y_train, epochs=100, batch_size=32, verbose=0)
# # Evaluate the quality of the trained model using test dataset
test_loss, test_mse, test_mae = model.evaluate(X_test, y_test, verbose=0)
print("Test MSE:", test_mse)
print("Test MAE:", test_mae)
# # Make predictions using the trained model
input_data = [[5.6, 8, 17.5, 12, 15, 19]]
scaled_input_data = scaler.transform(input_data)
prediction = model.predict(scaled_input_data)
print("CO2 Emissions: ", prediction[0][0])
|
# The main goal is Predict the House Prices using Regression.
# Therefore, there would be finding the right features that helps the prediction less error.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## **1. Get The Data**
# Take a look at the the Data Structure
housing = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
housing.head()
housing.info()
housing.describe()
housing.hist(bins=50, figsize=(20, 15))
plt.show()
# ### **Create a Test Set**
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
# ## **Discover and Visualize the Data to Gain Insights**
# - In this phase, we will copying the training dataset to play with.
# - Check missing values data
# - Looking for correlations
# - Experimenting with Attribute Combinations
housing1 = train_set.copy()
s1 = housing1.dtypes
s1.groupby(s1).count()
housing1.isnull().sum().sort_values(ascending=False).head(30)
# Features PoolQC, MiscFeature, Alley, Fence have a lot of missing values.
# We'll drop these features from the consideration
housing1.head()
plt.figure(figsize=(12, 4))
sns.heatmap(housing1.isnull(), cbar=False, cmap="viridis", yticklabels=False)
plt.title("Missing value in the dataset")
corr = housing1.corr()
sns.heatmap(corr, linewidths=0.5)
corr["SalePrice"].sort_values(ascending=False)
# Checking Correlation from SalePrice attribute.
# There are several attributes has positive correlation with SalePrice which are:
# OverallQual, GrLivArea, GarageArea,TotalBsmtSF, 1stFlrSF, FullBath,TotRmsAbvGrd, YearBuilt, YearRemodAdd,GarageYrBlt, MasVnrArea
from pandas.plotting import scatter_matrix
attributes = [
"SalePrice",
"OverallQual",
"GrLivArea",
"GarageArea",
"TotalBsmtSF",
"1stFlrSF",
"FullBath",
"TotRmsAbvGrd",
"YearBuilt",
"YearRemodAdd",
"GarageYrBlt",
"MasVnrArea",
]
pd.plotting.scatter_matrix(housing1[attributes], figsize=(15, 10))
|
# # Importing Libraries
#
import os
import spacy
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
# # Loading the Dataset
train_label_path = "/kaggle/input/facebook-hateful-meme-dataset/data/train.jsonl"
validation_label_path = "/kaggle/input/facebook-hateful-meme-dataset/data/dev.jsonl"
df_train = pd.read_json(train_label_path, lines=True)
df_val = pd.read_json(validation_label_path, lines=True)
df_train
df_val
df_train.head(4)
# # Checking null values
df_train.isna().sum()
df_train.shape
df_train.iloc[2]["text"]
nlp = spacy.load("en_core_web_sm")
nlp.pipe_names
# # Pie Chart for the distribution of data
import matplotlib.pyplot as plt
# Count the number of occurrences of each category in the training data
train_counts = df_train["label"].value_counts()
# Plot the counts as a pie chart
fig, ax = plt.subplots()
ax.pie(train_counts, labels=train_counts.index, autopct="%1.1f%%")
ax.set_title("Distribution of Categories in Training Data")
plt.show()
# # Text Preprocessing
df_train
# ### filtering other language
from langdetect import detect
def filter_english(text):
"""
Function to remove non-English words from a text
"""
words = text.split()
english_words = [word for word in words if word.isalpha() and detect(word) == "en"]
return " ".join(english_words)
text = "Bonjour! Comment ça va? I'm doing well, thank you. What about you?"
filtered_text = filter_english(text)
print(filtered_text)
import re
from bs4 import BeautifulSoup
import contractions
import nltk
nltk.download("punkt")
# Define a function for text preprocessing
def preprocess_text(text):
# Convert text to lowercase
text = text.lower()
# Remove extra whitespaces
text = re.sub(r"\s+", " ", text)
# Remove HTML tags using BeautifulSoup
soup = BeautifulSoup(text, "html.parser")
text = soup.get_text()
# Remove URLs
text = re.sub(r"http\S+", "", text)
# Replace abbreviations with their full form using contractions package
text = contractions.fix(text)
# Remove punctuation
text = re.sub(r"[^\w\s]", "", text)
# Replace chat words with their full form
chat_words_map = {
"afaik": "as far as i know",
"b4": "before",
"b/c": "because",
"btw": "by the way",
"cya": "see you",
"lol": "laugh out loud",
"np": "no problem",
"omg": "oh my god",
"thx": "thanks",
"ttyl": "talk to you later",
"wtf": "what the fuck",
"yolo": "you only live once",
"brb": "be right back",
"rofl": "rolling on the floor laughing",
"gtg": "got to go",
"ex": "previous girlfriend",
"idk": "i do not know",
"imo": "in my opinion",
}
words = text.split()
new_words = []
for word in words:
if word.lower() in chat_words_map:
new_words.append(chat_words_map[word.lower()])
else:
new_words.append(word)
text = " ".join(new_words)
return text
# Apply the function to the text in the dataframe
df_train["text"] = df_train["text"].apply(preprocess_text)
df_train.iloc[1]["text"]
# ### Handling emoji
import emoji
def convert_emoji_to_text(text):
# Replace emojis with their textual representation
text = emoji.demojize(text, delimiters=(" ", " "))
# Remove the colons that separate emoji codepoints
text = text.replace(":", "")
text = text.replace("_", " ")
return text
df_train["text"] = df_train["text"].apply(convert_emoji_to_text)
text_with_emoji = "I'm feeling 😊 today!"
text_without_emoji = convert_emoji_to_text(text_with_emoji)
print(text_without_emoji)
df_train.iloc[1]["text"]
# ### Lemmatization
import spacy
# Load the English language model
nlp = spacy.load("en_core_web_sm")
# Define a function to lemmatize text using spaCy
def lemmatize_text(text):
doc = nlp(text)
lemmatized_words = [token.lemma_ for token in doc]
return " ".join(lemmatized_words)
# Apply the lemmatization function to the 'text' column of the dataframe
df_train["text"] = df_train["text"].apply(lemmatize_text)
# ### Tokenization
from nltk.tokenize import word_tokenize
df_train["tokenized_text"] = df_train["text"].apply(word_tokenize)
df_train
|
import pandas as pd
import numpy as np
data = pd.read_csv("/kaggle/input/kidneystone/train.csv")
data.info()
data.head()
data.isnull().sum()
corr = data.corr()
corr["target"].sort_values()
# x=data.drop(['target'],axis=1).values
# from sklearn.decomposition import PCA
# pca=PCA(n_components=1)
# x=pca.fit_transform(x)
x = data.drop(["ph", "id", "cond", "osmo", "target"], axis=1).values
y = data["target"]
# from sklearn.model_selection import train_test_split
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x = sc.fit_transform(x)
# x_train=sc.fit_transform(x_train)
# x_test=sc.fit_transform(x_test)
print(x)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0)
classifier.fit(x, y)
y_pred = classifier.predict(x)
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, recall_score
cm = confusion_matrix(y, y_pred)
print(cm)
acc = accuracy_score(y, y_pred)
print(acc)
|
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import GridSearchCV
from xgboost import XGBClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import seaborn as sns
import gzip
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
test_file = "../input/avazu-ctr-prediction/test.gz"
samplesubmision_file = "../input/avazu-ctr-prediction/sampleSubmission.gz"
chunksize = 10**6
num_of_chunk = 0
train = pd.DataFrame()
for chunk in pd.read_csv("../input/avazu-ctr-train/train.csv", chunksize=chunksize):
num_of_chunk += 1
train = pd.concat(
[train, chunk.sample(frac=0.05, replace=False, random_state=123)], axis=0
)
print("Processing Chunk No. " + str(num_of_chunk))
train.reset_index(inplace=True)
train_len = len(train)
train_len
df = pd.concat([train, pd.read_csv(test_file, compression="gzip")]).drop(
["index", "id"], axis=1
)
def get_date(hour):
y = "20" + str(hour)[:2]
m = str(hour)[2:4]
d = str(hour)[4:6]
return y + "-" + m + "-" + d
df["weekday"] = pd.to_datetime(df.hour.apply(get_date)).dt.dayofweek.astype(str)
def tran_hour(x):
x = x % 100
while x in [23, 0]:
return "23-01"
while x in [1, 2]:
return "01-03"
while x in [3, 4]:
return "03-05"
while x in [5, 6]:
return "05-07"
while x in [7, 8]:
return "07-09"
while x in [9, 10]:
return "09-11"
while x in [11, 12]:
return "11-13"
while x in [13, 14]:
return "13-15"
while x in [15, 16]:
return "15-17"
while x in [17, 18]:
return "17-19"
while x in [19, 20]:
return "19-21"
while x in [21, 22]:
return "21-23"
df["hour"] = df.hour.apply(tran_hour)
df.info()
len_of_feature_count = []
for i in df.columns[2:23].tolist():
print(i, ":", len(df[i].astype(str).value_counts()))
len_of_feature_count.append(len(df[i].astype(str).value_counts()))
need_tran_feature = df.columns[2:4].tolist() + df.columns[13:23].tolist()
for i in need_tran_feature:
df[i] = df[i].astype(str)
obj_features = []
for i in range(len(len_of_feature_count)):
if len_of_feature_count[i] > 10:
obj_features.append(df.columns[2:23].tolist()[i])
obj_features
df_describe = df.describe()
df_describe
def obj_clean(X):
def get_click_rate(x):
temp = train[train[X.columns[0]] == x]
res = round((temp.click.sum() / temp.click.count()), 3)
return res
def get_type(V, str):
very_high = df_describe.loc["mean", "click"] + 0.04
higher = df_describe.loc["mean", "click"] + 0.02
lower = df_describe.loc["mean", "click"] - 0.02
very_low = df_describe.loc["mean", "click"] - 0.04
vh_type = V[V[str] > very_high].index.tolist()
hr_type = V[(V[str] > higher) & (V[str] < very_high)].index.tolist()
vl_type = V[V[str] < very_low].index.tolist()
lr_type = V[(V[str] < lower) & (V[str] > very_low)].index.tolist()
return vh_type, hr_type, vl_type, lr_type
def clean_function(x):
while x in type_[0]:
return "very_high"
while x in type_[1]:
return "higher"
while x in type_[2]:
return "very_low"
while x in type_[3]:
return "lower"
return "mid"
print("Run: ", X.columns[0])
fq = X[X.columns[0]].value_counts()
if len(fq) > 1000:
fq = fq[:1000]
fq = pd.DataFrame(fq)
fq["new_column"] = fq.index
fq["click_rate"] = fq.new_column.apply(get_click_rate)
type_ = get_type(fq, "click_rate")
return X[X.columns[0]].apply(clean_function)
for i in obj_features:
df[[i]] = obj_clean(df[[i]])
df
for i in df.columns:
sns.countplot(x=i, hue="click", data=df)
plt.show()
df.drop(["device_id", "C14", "C17", "C19", "C20", "C21"], axis=1, inplace=True)
df = pd.get_dummies(df)
train = df[:train_len]
test = df[train_len:]
del df
pre_X = train[train["click"] == 0].sample(
n=len(train[train["click"] == 1]), random_state=111
)
pre_X = pd.concat([pre_X, train[train["click"] == 1]]).sample(frac=1)
pre_y = pre_X[["click"]]
pre_X.drop(["click"], axis=1, inplace=True)
test.drop(["click"], axis=1, inplace=True)
pre_X_train, pre_X_test, pre_y_train, pre_y_test = train_test_split(
pre_X, pre_y, test_size=0.20, stratify=pre_y, random_state=1
)
params = {"criterion": ["gini", "entropy"], "max_depth": range(1, 20)}
grid_search = GridSearchCV(
DecisionTreeClassifier(),
param_grid=params,
scoring="roc_auc",
cv=100,
verbose=1,
n_jobs=-1,
)
grid_search.fit(pre_X_train, pre_y_train)
grid_search.best_score_, grid_search.best_estimator_, grid_search.best_params_
tree = grid_search.best_estimator_
tree.fit(pre_X, pre_y)
feature_importances = pd.DataFrame(tree.feature_importances_)
feature_importances.index = pre_X_train.columns
feature_importances = feature_importances.sort_values(0, ascending=False)
feature_importances
pre_X_train = pre_X_train[
feature_importances.index[: int(len(feature_importances) / 3)]
]
pre_X_test = pre_X_test[feature_importances.index[: int(len(feature_importances) / 3)]]
params = {"criterion": ["gini", "entropy"], "max_depth": range(1, 12)}
grid_search = GridSearchCV(
DecisionTreeClassifier(),
param_grid=params,
scoring="roc_auc",
cv=100,
verbose=1,
n_jobs=-1,
)
grid_search.fit(pre_X_train, pre_y_train)
grid_search.best_score_, grid_search.best_estimator_, grid_search.best_params_
pre_X = pre_X[feature_importances.index[: int(len(feature_importances) / 3)]]
tree = grid_search.best_estimator_
tree.fit(pre_X, pre_y)
feature_importances = pd.DataFrame(tree.feature_importances_)
feature_importances.index = pre_X_train.columns
feature_importances = feature_importances.sort_values(0, ascending=False)
feature_importances
feature_len = len(
feature_importances[feature_importances[feature_importances.columns[0]] > 0.005]
)
y = train[["click"]]
X = train[feature_importances[:feature_len].index]
test = test[feature_importances[:feature_len].index]
model = XGBClassifier(tree_method="gpu_hist", n_jobs=-1, n_estimators=500, max_depth=11)
model.fit(X, y.values.ravel())
y_pred = model.predict(X)
print("Roc_auc_score: ", roc_auc_score(y, y_pred) * 100, "%")
confmat = confusion_matrix(y_true=y, y_pred=y_pred, labels=[0, 1])
fig, ax = plt.subplots(figsize=(2.5, 2.5))
ax.matshow(confmat, cmap=plt.cm.Blues, alpha=0.3)
for i in range(confmat.shape[0]):
for j in range(confmat.shape[1]):
ax.text(x=j, y=i, s=confmat[i, j], va="center", ha="center")
plt.xlabel("Predicted label")
plt.ylabel("True label")
plt.tight_layout()
plt.show()
import seaborn as sns
sns.heatmap(
confmat,
annot=True,
fmt="",
xticklabels=[0, 1],
yticklabels=[0, 1],
linewidth=0.5,
cmap=sns.cubehelix_palette(as_cmap=True),
)
plt.xlabel("Predicted label", fontsize=15)
plt.ylabel("True label", fontsize=15)
plt.title("Confusion Matrix", fontsize=20)
plt.figure(figsize=(30.0, 30.0), dpi=200)
plt.tight_layout()
plt.show()
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
# Plot ROC curve for each model
fpr3, tpr3, _ = roc_curve(y, y_pred)
roc_auc3 = auc(fpr3, tpr3)
plt.figure(figsize=(10, 7))
plt.plot(fpr3, tpr3, color="blue", lw=2, label="XGBoost (AUC = %0.2f)" % roc_auc3)
plt.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate", fontsize=15)
plt.ylabel("True Positive Rate", fontsize=15)
plt.title("Receiver Operating Characteristic (ROC) Curve", fontsize=20)
plt.legend(loc="lower right")
plt.show()
import matplotlib.pyplot as plt
import numpy
from sklearn import metrics
from sklearn.metrics import precision_recall_fscore_support
print(precision_recall_fscore_support(y, y_pred, average="macro"))
cm = confusion_matrix(y, y_pred)
total1 = sum(sum(cm))
accuracy1 = (cm[0, 0] + cm[1, 1]) / total1
print("Accuracy : ", accuracy1)
sensitivity1 = cm[0, 0] / (cm[0, 0] + cm[0, 1])
print("Sensitivity : ", sensitivity1)
specificity1 = cm[1, 1] / (cm[1, 0] + cm[1, 1])
print("Specificity : ", specificity1)
F1_score = metrics.f1_score(y, y_pred)
print("F1 Score : ", F1_score)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import tensorflow as tf
import numpy as np
import time, re
def gumbel_softmax(o_t, temperature, eps=1e-20):
"""Sample from Gumbel(0, 1)"""
u = tf.random.uniform(tf.shape(o_t), minval=0, maxval=1)
g_t = -tf.math.log(-tf.math.log(u + eps) + eps)
gumbel_t = tf.math.add(o_t, g_t)
return tf.math.multiply(gumbel_t, temperature)
# ========================================================================================
# Sequence to Sequence
# Source: https://www.tensorflow.org/text/tutorials/nmt_with_attention
# ========================================================================================
class Seq2Seq:
def __init__(
self,
inp_lang,
targ_lang,
max_length,
vocab_inp_size,
vocab_tar_size,
batch_size=64,
embedding_dim=256,
units=1024,
loss_object=None,
optimizer=None,
w_atten=False,
gumbel=False,
gumbel_temp=0.5,
bpe=False,
):
self.BATCH_SIZE = batch_size
self.embedding_dim = embedding_dim
self.units = units
self.w_atten = w_atten
self.gumbel = gumbel
self.gumbel_temp = gumbel_temp
self.bpe = bpe
self.inp_lang = inp_lang
self.targ_lang = targ_lang
self.max_length_inp, self.max_length_targ = max_length
self.vocab_inp_size = vocab_inp_size
self.vocab_tar_size = vocab_tar_size
if self.bpe:
self.targ_lang_start_idx = self.targ_lang.tokenize("<start>")
self.targ_lang_start_idx = self.targ_lang_start_idx.numpy()[0]
self.targ_lang_end_idx = self.targ_lang.tokenize("<end>")
self.targ_lang_end_idx = tf.cast(self.targ_lang_end_idx, tf.int32)
else:
self.targ_lang_start_idx = self.targ_lang.word_index["<start>"]
if loss_object is not None:
self.loss_object = loss_object
else:
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True
)
if optimizer is not None:
self.optimizer = optimizer
else:
self.optimizer = tf.keras.optimizers.RMSprop()
self.encoder = Seq2Seq.Encoder(
self.vocab_inp_size, self.embedding_dim, self.units, self.BATCH_SIZE
)
self.decoder = Seq2Seq.Decoder(
self.vocab_tar_size,
self.embedding_dim,
self.units,
self.BATCH_SIZE,
self.w_atten,
self.gumbel,
self.gumbel_temp,
)
def loss_function(self, real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = self.loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
@tf.function
def train_step(self, inp, targ, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = self.encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([self.targ_lang_start_idx] * self.BATCH_SIZE, 1)
for t in range(1, targ.shape[1]):
if self.w_atten:
predictions, dec_hidden, _ = self.decoder(
dec_input, dec_hidden, enc_output
)
else:
predictions, dec_hidden = self.decoder(
dec_input, dec_hidden, enc_output
)
loss += self.loss_function(targ[:, t], predictions)
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = loss / int(targ.shape[1])
variables = self.encoder.trainable_variables + self.decoder.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
def train(self, dataset, steps_per_epoch, epochs):
for epoch in range(epochs):
start = time.time()
enc_hidden = self.encoder.initialize_hidden_state()
total_loss = 0
for batch, (inp, targ) in enumerate(dataset.take(steps_per_epoch)):
batch_loss = self.train_step(inp, targ, enc_hidden)
total_loss += batch_loss
if batch % 100 == 0:
print(
"Epoch {} Batch {} Loss {:.4f}".format(
epoch + 1, batch, batch_loss.numpy()
)
)
print(
"Epoch {} Loss {:.4f}".format(epoch + 1, total_loss / steps_per_epoch)
)
print("Time taken for 1 epoch {} sec\n".format(time.time() - start))
def evaluate(self, sentence):
# attention_plot = np.zeros((max_length_targ, max_length_inp))
sentence = preprocess_sentence(sentence)
if self.bpe:
inputs = [
self.inp_lang.tokenize(i)
if i != 0
else self.inp_lang.tokenize("<unkown>")
for i in sentence.split(" ")
]
inputs = tf.concat(inputs, 0)
enc_input = tf.expand_dims(inputs, 0)
dec_input = self.targ_lang.tokenize("<start>")
# dec_input = tf.concat(dec_input, 0)
dec_input = tf.expand_dims(dec_input, 0)
else:
inputs = [
self.inp_lang.word_index[i]
if i in self.inp_lang.word_index
else self.inp_lang.word_index["<unkown>"]
for i in sentence.split(" ")
]
inputs = tf.keras.preprocessing.sequence.pad_sequences(
[inputs], maxlen=self.max_length_inp, padding="post"
)
enc_input = tf.convert_to_tensor(inputs)
dec_input = tf.expand_dims([self.targ_lang.word_index["<start>"]], 0)
result = ""
hidden = [tf.zeros((1, self.units))]
enc_out, enc_hidden = self.encoder(enc_input, hidden)
dec_hidden = enc_hidden
for t in range(self.max_length_targ):
if self.w_atten:
predictions, dec_hidden, _ = self.decoder(
dec_input, dec_hidden, enc_out
)
predicted_id = tf.argmax(predictions[0]).numpy()
else:
predictions, dec_hidden = self.decoder(dec_input, dec_hidden, enc_out)
predicted_id = tf.argmax(predictions[0][0]).numpy()
if predicted_id != 0:
if self.bpe:
predicted_word = self.targ_lang.detokenize([[predicted_id]])
predicted_word = predicted_word.numpy()[0, 0].decode("utf-8")
result += predicted_word + " "
else:
result += self.targ_lang.index_word[predicted_id] + " "
if (self.bpe and tf.equal(predicted_id, self.targ_lang_end_idx)) or (
not self.bpe and self.targ_lang.index_word[predicted_id] == "<end>"
):
return result, sentence
dec_input = tf.expand_dims([predicted_id], 0)
result = re.sub(" ##", "", result) if self.bpe else result
return result, sentence
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Seq2Seq.Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(
self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer="glorot_uniform",
)
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state=hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class Decoder(tf.keras.Model):
def __init__(
self,
vocab_size,
embedding_dim,
dec_units,
batch_sz,
w_atten=False,
gumbel=False,
gumbel_temp=0.5,
):
super(Seq2Seq.Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.w_atten = w_atten
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(
self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer="glorot_uniform",
)
if gumbel:
self.fc = tf.keras.layers.Dense(
vocab_size,
activation=(lambda x: gumbel_softmax(x, temperature=gumbel_temp)),
)
else:
self.fc = tf.keras.layers.Dense(vocab_size)
if self.w_atten:
self.attention = Seq2Seq.BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
if self.w_atten:
context_vector, attention_weights = self.attention(hidden, enc_output)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
output, state = self.gru(x)
output = tf.reshape(output, (-1, output.shape[2]))
x = self.fc(output)
return x, state, attention_weights
else:
x = self.embedding(x)
output, state = self.gru(x, hidden)
x = self.fc(output)
return x, state
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(Seq2Seq.BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
hidden_with_time_axis = tf.expand_dims(query, 1)
score = self.V(tf.nn.tanh(self.W1(values) + self.W2(hidden_with_time_axis)))
attention_weights = tf.nn.softmax(score, axis=1)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
import unicodedata, re, os, io
import tensorflow as tf
import numpy as np
def preprocess_sentence(w, add_token=True):
w = re.sub(r"([?.!,'¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
w = re.sub(r"[^a-zA-Z?'.!,¿ıüçğşö]+", " ", w)
w = w.rstrip().strip()
if add_token:
w = "<start> " + w + " <end>"
return w
def create_dataset(path, num_examples, add_token=True):
lines = io.open(path, encoding="UTF-8").read().strip().split("\n")
word_pairs = [
[preprocess_sentence(w, add_token) for w in l.split("\t")]
for l in lines[:num_examples]
]
return zip(*word_pairs)
def max_length(tensor):
return max(len(t) for t in tensor)
def add_unknown_to_vocab(lang):
index_word, word_index = {1: "<unkown>"}, {"<unkown>": 1}
for i in lang.index_word:
index_word[i + 1] = lang.index_word[i]
word_index[lang.index_word[i]] = i + 1
return index_word, word_index
def remove_words(tokenizer, num_words):
tokenizer.index_word = {
k: tokenizer.index_word[k] for k in list(tokenizer.index_word)[:num_words]
}
tokenizer.word_index = {
k: tokenizer.word_index[k] for k in list(tokenizer.word_index)[:num_words]
}
return tokenizer
def tokenize(lang, maxlen, num_words):
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters="")
lang_tokenizer.fit_on_texts(lang)
lang_tokenizer.index_word, lang_tokenizer.word_index = add_unknown_to_vocab(
lang_tokenizer
)
lang_tokenizer = remove_words(lang_tokenizer, num_words)
tensor = lang_tokenizer.texts_to_sequences(lang)
tensor = tf.keras.preprocessing.sequence.pad_sequences(
tensor, padding="post", maxlen=maxlen
)
return tensor, lang_tokenizer
def load_dataset(path, num_examples=None, maxlen=50, num_words=None):
inp_lang, targ_lang = create_dataset(path, num_examples)
input_tensor, inp_lang_tokenizer = tokenize(inp_lang, maxlen, num_words)
target_tensor, targ_lang_tokenizer = tokenize(targ_lang, maxlen, num_words)
return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer
import numpy as np
from sklearn.model_selection import train_test_split
# 加载数据集
input_tensor, target_tensor, inp_lang, targ_lang = load_dataset(
"/kaggle/input/cheryl-news-ted/newted_en_fr.txt",
num_examples=130000,
maxlen=50,
num_words=40000,
)
# 划分训练集、验证集和测试集
train_size = 120000
val_size = 5000
input_tensor_train = input_tensor[:train_size]
target_tensor_train = target_tensor[:train_size]
input_tensor_val = input_tensor[train_size : train_size + val_size]
target_tensor_val = target_tensor[train_size : train_size + val_size]
input_tensor_test = input_tensor[train_size + val_size :]
target_tensor_test = target_tensor[train_size + val_size :]
max_length_inp = max_length(input_tensor)
max_length_targ = max_length(target_tensor)
vocab_inp_size = len(inp_lang.word_index) + 1
vocab_tar_size = len(targ_lang.word_index) + 1
# 超参数设置
BATCH_SIZE = 64
embedding_dim = 256
units = 1024
epochs = 10
# 创建数据集
dataset = tf.data.Dataset.from_tensor_slices(
(input_tensor_train, target_tensor_train)
).shuffle(len(input_tensor_train))
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
steps_per_epoch = len(input_tensor_train) // BATCH_SIZE
# 初始化模型
model = Seq2Seq(
inp_lang,
targ_lang,
(max_length_inp, max_length_targ),
vocab_inp_size,
vocab_tar_size,
batch_size=BATCH_SIZE,
embedding_dim=embedding_dim,
units=units,
loss_object=None,
optimizer=None,
w_atten=False,
gumbel=False,
gumbel_temp=0.5,
bpe=False,
)
# 训练模型
model.train(dataset, steps_per_epoch, epochs)
# 然后,保存你的训练好的模型
encoder_weights_path = "encoder_path" # 你可以选择一个合适的文件夹来保存模型
model.encoder.save_weights(encoder_weights_path)
model.decoder.save_weights("decoder_path")
num_examples = 20
for i in range(num_examples):
source_sentence = " ".join(
[inp_lang.index_word[t] for t in input_tensor_test[i] if t not in [0, 1, 2]]
)
target_sentence = " ".join(
[targ_lang.index_word[t] for t in target_tensor_test[i] if t not in [0, 1, 2]]
)
predicted_sentence, _ = model.evaluate(source_sentence)
print(f"Source Sentence: {source_sentence}")
print(f"Target Sentence: {target_sentence}")
print(f"Predicted Sentence: {predicted_sentence}\n")
from collections import defaultdict
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
def calculate_bleu_score(references, candidate):
smoothie = SmoothingFunction().method4
score = sentence_bleu(references, candidate, smoothing_function=smoothie)
return score
# 计算测试集上的 BLEU 分数
bleu_scores_by_length = defaultdict(list)
sentence_count_by_length = defaultdict(int)
# 在循环外部初始化变量
total_bleu_score = 0
total_sentence_count = 0
kk = len(input_tensor_test)
# kk=50
for i in range(kk):
input_sentence = " ".join(
[inp_lang.index_word[idx] for idx in input_tensor_test[i] if idx != 0]
)
target_sentence = " ".join(
[targ_lang.index_word[idx] for idx in target_tensor_test[i] if idx != 0]
)
predicted_sentence, _ = model.evaluate(input_sentence)
predicted_sentence = predicted_sentence.strip().split(" ")
target_sentence = (
target_sentence.replace("<start>", "").replace("<end>", "").strip().split(" ")
)
sentence_length = len(target_sentence)
if sentence_length <= 60:
current_bleu_score = calculate_bleu_score([target_sentence], predicted_sentence)
bleu_scores_by_length[sentence_length].append(
calculate_bleu_score([target_sentence], predicted_sentence)
)
sentence_count_by_length[sentence_length] += 1
# 累加 BLEU 分数和句子数量
total_bleu_score += current_bleu_score
total_sentence_count += 1
# 计算整个数据集的平均 BLEU 分数
average_bleu_score = total_bleu_score / total_sentence_count
# 输出结果
print("Length | Avg BLEU Score | Sentence Count")
print("-----------------------------------------")
for length in range(1, 61):
if length in bleu_scores_by_length:
avg_bleu_score = np.mean(bleu_scores_by_length[length])
count = sentence_count_by_length[length]
print(f"{length:5} | {avg_bleu_score:13.4f} | {count:13}")
print("\nOverall Average BLEU Score: {:.4f}".format(average_bleu_score))
predicted_sentence, _ = model.evaluate("this is my job")
print(predicted_sentence)
predicted_sentence, _ = model.evaluate(
"et nous savons en fait quelque chose sur la manière dont le cerveau y parvient."
)
print(predicted_sentence)
predicted_sentence, _ = model.evaluate(
"Aujourd'hui, je suis allée dans un restaurant chinois où le canard rôti était très bon."
)
print(predicted_sentence)
predicted_sentence, _ = model.evaluate("Enchanté de vous rencontrer")
print(predicted_sentence)
predicted_sentence, _ = model.evaluate("Je n'aime pas cette nourriture.")
print(predicted_sentence)
predicted_sentence, _ = model.evaluate("Enchanté de vous rencontrer")
print(predicted_sentence)
predicted_sentence, _ = model.evaluate(
"La vie est courte et précieuse, nous devons saisir toutes les occasions de poursuivre ce que nous voulons vraiment, et persévérer quelles que soient les difficultés que nous rencontrons, car c'est seulement de cette manière que nous pouvons réaliser notre valeur personnelle."
)
print(predicted_sentence)
predicted_sentence, _ = model.evaluate(
"La chose la plus importante dans la vie est de trouver son but, de poursuivre ses passions intérieures et les choses qui vous font vraiment sentir épanoui."
)
print(predicted_sentence)
|
# # cover
# TABLE OF CONTENTS
# ---
# 1 INTRODUCTION & JUSTIFICATION
# ---
# 2 BACKGROUND INFORMATION
# ---
# 3 IMPORTS
# ---
# 4 SETUP & HELPER FUNCTIONS
# ---
# 5 EXPLORATORY DATA ANALYSIS
# ---
# 6 BASELINE
# ---
# 7 NEXT STEPS
# 1 INTRODUCTION & JUSTIFICATION ⤒
# 1.1 WHAT IS THIS?
# ---
# * This notebook will follow the authors learning path and highlight useful content about the competition
# * This notebook will conduct an Exploratory Data Analysis for the competition
# * This notebook will propose an open-source baseline solution
# 1.2 WHY IS THIS?
# ---
# * Writing and sharing my learning path and the resulting exploratory data analysis can help improve my own understanding of the competition and the data.
# * Sharing my work may help others who are interested in the competition (or the data). This help may take the form of:
# * better understanding the problem and potential common solutions (incl. my baseline)
# * better understanding of the provided dataset
# * better understanding of th background information and research
# * better ability to hypothesis new solutions
# * Exploratory data analysis is a critical step in any data science project. Sharing my EDA might help others in the competition.
# * Writing and sharing my work is often a fun and rewarding experience! It now only allows me to explore and try different techniques, ideas and visualizations... but it also encourages and supports other learners and partipants.
# 1.4 HOW WILL THIS WORK?
# ---
# I'm going to assemble some markdown cells (like this one) at the beginning of the notebook to go over some concepts/details/etc.
# Following this, I will attempt to walk through the data and understand it better prior to composing a baseline solution
# 3 IMPORTS ⤒
print("\n... IMPORTS STARTING ...\n")
# Machine Learning and Data Science Imports (basics)
import pandas as pd
print(f"\t– PANDAS VERSION: {pd.__version__}")
import numpy as np
print(f"\t– NUMPY VERSION: {np.__version__}")
# Built-In Imports (mostly don't worry about these)
import os
# Visualization Imports (overkill)
import matplotlib.pyplot as plt
print("\n... IMPORTS COMPLETE ...\n")
|
# # Introduction
# This Notebook is part of the series of using Deep Learning to implement Recommender Systems.
# The dataset used is Movielens.
# GitHub project source: https://github.com/shawnhan108/The-Recommenders/
# Paper: https://www.researchgate.net/publication/340416554_Deep_Learning_Architecture_for_Collaborative_Filtering_Recommender_Systems
# # Analysis preparation
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import random
import os
from sklearn.utils import shuffle
from keras.layers import (
Input,
Embedding,
Dense,
Flatten,
Concatenate,
Add,
Dot,
Dropout,
BatchNormalization,
Activation,
)
from keras.models import Model
from keras.optimizers import Adam, SGD
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from movie_lens_data import MovieLensData
# # Read the data
path = "/kaggle/input/movielens-100k-dataset/ml-100k"
movie_lens_data = MovieLensData(
users_path=os.path.join(path, "u.user"),
ratings_path=os.path.join(path, "u.data"),
movies_path=os.path.join(path, "u.item"),
genre_path=os.path.join(path, "u.genre"),
)
evaluation_data = movie_lens_data.read_ratings_data()
movie_data = movie_lens_data.read_movies_data()
popularity_rankings = movie_lens_data.get_popularity_ranks()
ratings = movie_lens_data.get_ratings()
# # Prepare the algorithm
def train_test_split(ratings):
movie_df = ratings
movie_df = shuffle(movie_df)
train_size = int(0.75 * movie_df.shape[0])
train_df = movie_df.iloc[:train_size]
test_df = movie_df.iloc[train_size:]
return movie_df, train_df, test_df
movie_df, train_df, test_df = train_test_split(ratings)
user_num = movie_df.user_id.max() + 1
movie_num = movie_df.movie_id.max() + 1
K = 100
MU = train_df.rating.mean()
EPOCHS = 15
REGULAR = 0.1
def build_model(user_num, movie_num):
# input
user_input = Input(shape=(1,), name="user_input")
movie_input = Input(shape=(1,), name="movie_input")
# Matrix Factorization
# Since user_num < movie_num, we have user_num = batchsize
user_embedding = Embedding(user_num, K, name="user_embedding")(
user_input
) # bsize x 1 x k
movie_embedding = Embedding(movie_num, K, name="movie_embedding")(
movie_input
) # bsize x 1 x k
user_bias = Embedding(user_num, 1, name="user_bias")(user_input) # bsize x 1 x 1
movie_bias = Embedding(movie_num, 1, name="movie_bias")(
movie_input
) # bsize x 1 x 1
model = Dot(axes=2)([user_embedding, movie_embedding]) # bsize x 1 x 1
model = Add()([model, user_bias, movie_bias])
model = Flatten()(model) # bsize x 1
# Residual
user_embedding_flat = Flatten(name="flt_user_emb")(user_embedding) # bsize x k
movie_embedding_flat = Flatten(name="flt_movie_emb")(movie_embedding) # bsize x k
residual = Concatenate(name="concatenate_user_movie")(
[user_embedding_flat, movie_embedding_flat]
) # bsize x 2k
residual = Dense(512)(residual)
residual = Dropout(0.25)(residual)
residual = Dense(256)(residual)
residual = Dropout(0.25)(residual)
residual = Activation("elu")(residual)
residual = Dense(1)(residual)
# Together
model = Add()([model, residual])
model = Model(inputs=[user_input, movie_input], outputs=model)
model.compile(
loss="mse", optimizer=SGD(learning_rate=0.1, momentum=0.5), metrics=["mse"]
)
return model
model = build_model(user_num, movie_num)
model.summary()
plot_model(model, to_file="model.png")
# # Train the model
history = model.fit(
x=[train_df.user_id.values, train_df.movie_id.values],
y=train_df.rating.values - MU,
epochs=EPOCHS,
batch_size=128,
validation_data=(
[test_df.user_id.values, test_df.movie_id.values],
test_df.rating.values - MU,
),
)
# # Check the results
def plot(history):
f, ax = plt.subplots(1, 2, figsize=(10, 4))
ax[0].plot(history.history["loss"], label="train loss")
ax[0].plot(history.history["val_loss"], label="validation loss")
ax[0].legend()
ax[1].plot(history.history["mse"], label="train mse")
ax[1].plot(history.history["val_mse"], label="validation mse")
ax[1].legend()
plt.suptitle("Train/Validation loss & mse.")
plt.show()
plot(history)
def predict_rating(data):
result = model.predict([data.user_id.values, data.user_id.values])
return result + MU
from sklearn.metrics import mean_squared_error
predicted = predict_rating(test_df).flatten()
mse = mean_squared_error(test_df.rating.values, predicted)
rmse = mean_squared_error(test_df.rating.values, predicted, squared=False)
print("Test MSE:", mse)
print("Test RMSE:", rmse)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
import unicodedata
from nltk.tokenize.toktok import ToktokTokenizer
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.preprocessing.text import Tokenizer
from tqdm import tqdm
import codecs
import nltk
import matplotlib.pyplot as plt
# remove html tags
def remove_html_tags(text):
soup = BeautifulSoup(text, "html.parser")
[s.extract() for s in soup(["iframe", "script"])]
stripped_text = soup.get_text()
stripped_text = re.sub(r'["\|\n|\r|\n\r]+', "", stripped_text)
return stripped_text
# remove html:
def remove_html(text):
text = re.sub(r"https?:\/\/\S*", "", text, flags=re.MULTILINE)
return text
# removing accented characters
def remove_accented_chars(text):
text = (
unicodedata.normalize("NFKD", text)
.encode("ascii", "ignore")
.decode("utf-8", "ignore")
)
return text
# removing special characters:
def remove_special_characters(text):
text = re.sub(r"[^a-zA-z\s]", "", text)
return text
# removing stopwords
def remove_stopwords(text):
tokenizer = ToktokTokenizer()
stopword_list = [
"i",
"me",
"my",
"myself",
"we",
"our",
"ours",
"ourselves",
"you",
"your",
"yours",
"yourself",
"yourselves",
"he",
"him",
"his",
"himself",
"she",
"her",
"hers",
"herself",
"it",
"its",
"itself",
"they",
"them",
"their",
"theirs",
"themselves",
"what",
"which",
"who",
"whom",
"this",
"that",
"these",
"those",
"am",
"is",
"are",
"was",
"were",
"be",
"been",
"being",
"have",
"has",
"had",
"having",
"do",
"does",
"did",
"doing",
"a",
"an",
"the",
"and",
"but",
"if",
"or",
"because",
"as",
"until",
"while",
"of",
"at",
"by",
"for",
"with",
"about",
"against",
"between",
"into",
"through",
"during",
"before",
"after",
"above",
"below",
"to",
"from",
"up",
"down",
"in",
"out",
"on",
"off",
"over",
"under",
"again",
"further",
"then",
"once",
"here",
"there",
"when",
"where",
"why",
"how",
"all",
"any",
"both",
"each",
"few",
"more",
"most",
"other",
"some",
"such",
"no",
"nor",
"not",
"only",
"own",
"same",
"so",
"than",
"too",
"very",
"s",
"t",
"can",
"will",
"just",
"don",
"should",
"now",
]
tokens = tokenizer.tokenize(text)
tokens = [token.strip() for token in tokens]
filtered_tokens = [token for token in tokens if token not in stopword_list]
filtered_text = " ".join(filtered_tokens)
return filtered_text
def preprocessing_text(text_arr):
preprocessed_text = []
idx = 0
for text in text_arr:
text = remove_html(text)
text = remove_html_tags(text) # remove html tags
# text = remove_accented_chars(text) #removing accented characters
# text = remove_special_characters(text) #removing special characters
# text = text.lower() #change to lower case
# text = remove_stopwords(text) #removing stopwords
# word_seq,vocab_size = toakenizing(text)
# max_vocab_size = np.max(vocab_size)
preprocessed_text.append(text)
idx += 1
print("Data Preprocessing finished.")
return preprocessed_text
# data = '/kaggle/input/dir-processed/DIR_processed.csv'
data = "/kaggle/input/tweets-it/tweets-19-20.csv"
df = pd.read_csv(data, index_col=0, encoding="ISO 8859-1")
text = df.loc[:, "text"].values
text_cleaned = preprocessing_text(text)
len(text_cleaned)
df_text_cleaned = pd.DataFrame({"cleaned_text": text_cleaned})
words_text_cleaned = df_text_cleaned.cleaned_text.str.split().str.len()
words_text_cleaned.sum()
plt.figure(figsize=(10, 5))
(n, bins, patches) = plt.hist(
words_text_cleaned.values, edgecolor="black", bins=range(50), linewidth=1
)
# plt.ylim((0,5000))
plt.xlabel("Word Counts")
plt.show()
labels = df.iloc[:, 1:8].values
print(labels)
categories = df.columns.values[1:8]
print(categories)
import pandas as pd
import torch
import torch.nn as nn
from transformers import BertTokenizer
print("Loading BERT tokenizer...")
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True)
# Print the original sentence.
print(" Original: ", text_cleaned[0])
# Print the sentence split into tokens.
print("Tokenized: ", tokenizer.tokenize(text_cleaned[0]))
# Print the sentence mapped to token ids.
print(
"Token IDs: ", tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text_cleaned[0]))
)
max_len = 0
# For every sentence...
for sent in text_cleaned:
# Tokenize the text and add `[CLS]` and `[SEP]` tokens.
input_ids = tokenizer.encode(sent, add_special_tokens=True)
# Update the maximum sentence length.
max_len = max(max_len, len(input_ids))
print("Max sentence length: ", max_len)
input_ids = []
attention_masks = []
# For every sentence...
for sent in text:
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens=True, # Add '[CLS]' and '[SEP]'
max_length=64, # Pad & truncate all sentences.
pad_to_max_length=True,
return_attention_mask=True, # Construct attn. masks.
return_tensors="pt", # Return pytorch tensors.
truncation=True,
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict["input_ids"])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict["attention_mask"])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
# Print sentence 0, now as a list of IDs.
print("Original: ", text_cleaned[0])
print("Token IDs:", input_ids[0])
input_ids.shape
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
# The DataLoader needs to know our batch size for training, so we specify it
# here. For fine-tuning BERT on a specific task, the authors recommend a batch
# size of 16 or 32.
batch_size = 32
from torch.utils.data import TensorDataset, random_split
dataset = TensorDataset(input_ids, attention_masks, labels)
# For validation the order doesn't matter, so we'll just read them sequentially.
validation_dataloader = DataLoader(
dataset, # The validation samples.
sampler=SequentialSampler(dataset), # Pull out batches sequentially.
batch_size=batch_size, # Evaluate with this batch size.
)
import torch
import torch.nn as nn
from transformers import BertModel
class BertClassifier(nn.Module):
"""Bert Model for Classification Tasks."""
def __init__(self, freeze_bert=True):
"""
@param bert: a BertModel object
@param classifier: a torch.nn.Module classifier
@param freeze_bert (bool): Set `False` to fine-tune the BERT model
"""
super(BertClassifier, self).__init__()
# Specify hidden size of BERT, hidden size of our classifier, and number of labels
D_in, H, D_out = 768, 50, 7
# Instantiate BERT model
self.bert = BertModel.from_pretrained("/kaggle/input/importmodel")
# Instantiate an one-layer feed-forward classifier
self.classifier = nn.Sequential(
nn.Linear(D_in, H),
nn.ReLU(),
# nn.Dropout(0.5),
nn.Linear(H, D_out),
nn.Sigmoid(),
)
# Freeze the BERT model
if freeze_bert:
for param in self.bert.parameters():
param.requires_grad = False
def forward(self, input_ids, attention_mask):
"""
Feed input to BERT and the classifier to compute logits.
@param input_ids (torch.Tensor): an input tensor with shape (batch_size,
max_length)
@param attention_mask (torch.Tensor): a tensor that hold attention mask
information with shape (batch_size, max_length)
@return logits (torch.Tensor): an output tensor with shape (batch_size,
num_labels)
"""
# Feed input to BERT
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
# Extract the last hidden state of the token `[CLS]` for classification task
last_hidden_state_cls = outputs[0][:, 0, :]
# Feed input to classifier to compute logits
logits = self.classifier(last_hidden_state_cls)
return logits
def evaluate(model, val_dataloader):
"""After the completion of each training epoch, measure the model's performance
on our validation set.
"""
# Put the model into the evaluation mode. The dropout layers are disabled during
# the test time.
model.eval()
# Tracking variables
val_precision = []
val_recall = []
val_f1 = []
val_loss = []
# For each batch in our validation set...
for batch in val_dataloader:
# Load batch to GPU
b_input_ids, b_attn_mask, b_labels = tuple(t for t in batch)
# Compute logits
with torch.no_grad():
logits = model(b_input_ids, b_attn_mask)
# Compute loss
loss = loss_fn(logits, b_labels.type(torch.float))
val_loss.append(loss.item())
# Get the predictions
logits_arr = logits.cpu().numpy()
logits_arr[logits_arr >= 0.5] = 1
logits_arr[logits_arr < 0.5] = 0
b_labels_arr = b_labels.cpu().numpy()
# Calculate the accuracy rate
precision_micro = metrics.precision_score(
y_true=b_labels_arr, y_pred=logits_arr, average="micro"
)
val_precision.append(precision_micro)
recall_micro = metrics.recall_score(
y_true=b_labels_arr, y_pred=logits_arr, average="micro"
)
val_recall.append(recall_micro)
f1_micro = metrics.f1_score(
y_true=b_labels_arr, y_pred=logits_arr, average="micro"
)
val_f1.append(f1_micro)
# Compute the average accuracy and loss over the validation set.
val_loss = np.mean(val_loss)
val_precision = np.mean(precision_micro)
val_recall = np.mean(recall_micro)
val_f1 = np.mean(f1_micro)
return val_loss, val_precision, val_recall, val_f1
import matplotlib.pyplot as plt
def evaluate_roc(probs, y_true, title_category):
"""
- Print AUC and accuracy on the test set
- Plot ROC
@params probs (np.array): an array of predicted probabilities with shape (len(y_true), 2)
@params y_true (np.array): an array of the true values with shape (len(y_true),)
"""
preds = probs
fpr, tpr, threshold = metrics.roc_curve(y_true, preds)
roc_auc = metrics.auc(fpr, tpr)
print(f"AUC: {roc_auc:.4f}")
# Get accuracy over the test set
y_pred = np.where(preds >= 0.5, 1, 0)
accuracy = metrics.accuracy_score(y_true, y_pred)
print(f"Accuracy: {accuracy*100:.2f}%")
# Plot ROC AUC
plt.title("Receiver Operating Characteristic for " + title_category)
plt.plot(fpr, tpr, "b", label="AUC = %0.2f" % roc_auc)
plt.legend(loc="lower right")
plt.plot([0, 1], [0, 1], "r--")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.show()
import torch.nn.functional as F
def bert_predict(model, test_dataloader):
"""Perform a forward pass on the trained BERT model to predict probabilities
on the test set.
"""
# Put the model into the evaluation mode. The dropout layers are disabled during
# the test time.
model.eval()
all_logits = []
# For each batch in our test set...
for batch in test_dataloader:
# Load batch to GPU
b_input_ids, b_attn_mask = tuple(t for t in batch)[:2]
# Compute logits
with torch.no_grad():
logits = model(b_input_ids, b_attn_mask)
all_logits.append(logits.type(torch.float))
# Concatenate logits from each batch
all_logits = torch.cat(all_logits, dim=0)
# Apply softmax to calculate probabilities
probs = all_logits.cpu().numpy()
return probs
from torchsummary import summary
bert_classifier = BertClassifier(freeze_bert=True)
from sklearn import metrics
def calculate_metrics(pred, target):
# pred = np.array(pred > threshold, dtype=float)
return {
"micro/precision": metrics.precision_score(
y_true=target, y_pred=pred, average="micro"
),
"micro/recall": metrics.recall_score(
y_true=target, y_pred=pred, average="micro"
),
"micro/f1": metrics.f1_score(y_true=target, y_pred=pred, average="micro"),
"macro/precision": metrics.precision_score(
y_true=target, y_pred=pred, average="macro"
),
"macro/recall": metrics.recall_score(
y_true=target, y_pred=pred, average="macro"
),
"macro/f1": metrics.f1_score(y_true=target, y_pred=pred, average="macro"),
"samples/precision": metrics.precision_score(
y_true=target, y_pred=pred, average="samples"
),
"samples/recall": metrics.recall_score(
y_true=target, y_pred=pred, average="samples"
),
"samples/f1": metrics.f1_score(y_true=target, y_pred=pred, average="samples"),
}
test_labels = np.array([])
for i, data in enumerate(validation_dataloader):
test_labels = np.append(test_labels, data[2].numpy())
print(test_labels.shape)
test_labels = test_labels.reshape(83, 7)
test_labels.shape
probs = bert_predict(bert_classifier, validation_dataloader)
# Get predictions from the probabilities
threshold = 0.5
preds = np.where(probs > threshold, 1, 0)
# Number of tweets predicted non-negative
# print("Labels of tweets predicted: ", preds)
results = calculate_metrics(pred=preds, target=test_labels.astype("int"))
print(f'Micro Precision: {results["micro/precision"]:.4f}')
print(f'Micro Recall: {results["micro/recall"]:.4f}')
print(f'Mirco F1: {results["micro/f1"]:.4f}')
print(f'Macro Precision: {results["macro/precision"]:.4f}')
print(f'Macro Recall: {results["macro/recall"]:.4f}')
print(f'Macro F1: {results["macro/f1"]:.4f}')
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.io
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import sys
import timeit
from sklearn.metrics import confusion_matrix
import math
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Loading CT Scans
data = scipy.io.loadmat(
"/kaggle/input/ctscan-segmentation-reconstruction-dataset/ctscan_hw1.mat"
)
temp_ct_scans = data["ctscan"]
ct_scans = []
for i in range(temp_ct_scans.shape[2]):
ct_scans.append(temp_ct_scans[:, :, i])
ct_scans = np.array(ct_scans)
print(ct_scans.shape)
# Loading Infection Masks
data = scipy.io.loadmat(
"/kaggle/input/ctscan-segmentation-reconstruction-dataset/infmsk_hw1.mat"
)
infmask = data["infmsk"]
infection_masks = []
for i in range(infmask.shape[2]):
infection_masks.append(infmask[:, :, i])
infection_masks = np.array(infection_masks)
print(infection_masks.shape)
N = ct_scans.shape[0]
# Manual Centroid Initialization; DOESN'T WORK
# def find_centroids(temp_ct_scan, temp_inf_mask):
# b = None
# i = None
# h = None
# for i in range(512):
# for j in range(512):
# # if(not (b == None or i == None or h == None) ):
# # break
# if(temp_inf_mask[i][j] == 0):
# b = temp_ct_scan[i][j]
# if(temp_inf_mask[i][j] == 1):
# i = temp_ct_scan[i][j]
# else:
# h = temp_ct_scan[i][j]
# return np.array([b,h,i]).reshape(3,1)
# Functions to correct predicted mask, i.e., correct background, infection and healthy region as expected
def find_counts(mask):
count_background = np.count_nonzero(mask == 0)
count_infection = np.count_nonzero(mask == 1)
count_healthy = np.count_nonzero(mask == 2)
return np.array([count_background, count_infection, count_healthy])
def check_pred_mask(pred_mask):
pred_count_list = find_counts(pred_mask)
original_count_list = np.array([100, 10, 50])
mapping = dict({})
for i in range(3):
pred_max_idx = np.argmax(pred_count_list)
original_max_idx = np.argmax(original_count_list)
mapping[pred_max_idx] = original_max_idx
pred_count_list[pred_max_idx] = -1
original_count_list[original_max_idx] = -1
corrected_mask = np.empty(shape=(512, 512), dtype=int)
for i in range(512):
for j in range(512):
corrected_mask[i][j] = mapping[pred_mask[i][j]]
return corrected_mask
# Using k-means for Image Segmentation
def get_predicted_mask(ct_scans):
start = timeit.default_timer()
pred_masks = []
N = len(ct_scans)
for i in range(N):
sys.stdout.write("\r" + "Processing Image " + str(i))
sample = ct_scans[i]
kmeans_obj = KMeans(n_clusters=3, random_state=0)
ct_scan_flattened = sample.flatten().reshape((512 * 512, 1))
clusters = kmeans_obj.fit_predict(ct_scan_flattened)
curr_pred_mask = clusters.reshape((512, 512))
curr_pred_mask = check_pred_mask(curr_pred_mask)
pred_masks.append(curr_pred_mask)
pred_masks = np.array(pred_masks)
print("\n", pred_masks.shape)
stop = timeit.default_timer()
print("Time Taken = ", stop - start)
return pred_masks
pred_masks = get_predicted_mask(ct_scans)
# Checking the Predicted Mask with the expert annotations
# for i in range(N):
# f, axarr = plt.subplots(1,3)
# axarr[0].set_title("Expert Annotations")
# axarr[0].imshow(infection_masks[i])
# axarr[1].set_title("Predicted Masks")
# axarr[1].imshow(pred_masks[i])
# axarr[2].set_title("CT Scans")
# axarr[2].imshow(ct_scans[i])
# plt.show()
# Two Samples with Expert Annotations(left), Predicted Mask(middle) and CT Scans(right)
plt.rcParams["figure.figsize"] = (12, 12)
i = 99
f, axarr = plt.subplots(1, 3)
axarr[0].set_title("Expert Annotations")
axarr[0].imshow(infection_masks[i], cmap="gray")
axarr[1].set_title("Predicted Masks")
axarr[1].imshow(pred_masks[i], cmap="gray")
axarr[2].set_title("CT Scans")
axarr[2].imshow(ct_scans[i], cmap="gray")
f.tight_layout()
plt.show()
i = 69
f, axarr = plt.subplots(1, 3)
axarr[0].set_title("Expert Annotations")
axarr[0].imshow(infection_masks[i], cmap="gray")
axarr[1].set_title("Predicted Masks")
axarr[1].imshow(pred_masks[i], cmap="gray")
axarr[2].set_title("CT Scans")
axarr[2].imshow(ct_scans[i], cmap="gray")
f.tight_layout()
plt.show()
# # Pixel Accuracy(Not Required), Not DICE_SCORE
# pixel_accuracy = []
# for i in range(len(pred_masks)):
# pred_mask = pred_masks[i]
# original_mask = infection_masks[i]
# pixel_accuracy.append((pred_mask == original_mask).sum()/ (512*512))
# pixel_accuracy = np.array(pixel_accuracy)
# print("Average Pixel Accuracy = ", np.mean(pixel_accuracy))
# Evaluating the model performance using several evaluation metrics
def get_confusion_metric(true_y, pred_y):
true_y = true_y.flatten()
pred_y = pred_y.flatten()
return confusion_matrix(true_y, pred_y, labels=[0, 1, 2])
def get_req_avg_eval_metrics(infection_masks, pred_masks):
avg_infection_sensitivity = 0
avg_infection_specificity = 0
avg_infection_accuracy = 0
avg_infection_dice_score = 0
avg_healthy_sensitivity = 0
avg_healthy_specificity = 0
avg_healthy_accuracy = 0
avg_healthy_dice_score = 0
count_infection_sensitivity = 0 # nan error
N = len(infection_masks)
for i in range(N):
curr_confusion_metric = (
get_confusion_metric(infection_masks[i], pred_masks[i])
).T
infection_TP = curr_confusion_metric[1][1]
infection_TN = (
curr_confusion_metric[0][0]
+ curr_confusion_metric[2][0]
+ curr_confusion_metric[0][2]
+ curr_confusion_metric[2][2]
)
infection_FP = curr_confusion_metric[1][0] + curr_confusion_metric[1][2]
infection_FN = curr_confusion_metric[0][1] + curr_confusion_metric[2][1]
healthy_TP = curr_confusion_metric[2][2]
healthy_TN = (
curr_confusion_metric[0][0]
+ curr_confusion_metric[0][1]
+ curr_confusion_metric[1][0]
+ curr_confusion_metric[1][1]
)
healthy_FP = curr_confusion_metric[2][0] + curr_confusion_metric[2][1]
healthy_FN = curr_confusion_metric[0][2] + curr_confusion_metric[1][2]
# Sensitivity = Recall = TP/(TP+FN)
# Preicision = TP/(TP+FP)
# Specificity = TN/(TN+FP)
# Dice Score = 2.TP / (2.TP + FP + FN)
infection_sensitivity = 0
if (infection_TP + infection_FN) != 0:
count_infection_sensitivity += 1
infection_sensitivity = (infection_TP) / (infection_TP + infection_FN)
infection_specificity = (infection_TN) / (infection_TN + infection_FP)
infection_accuracy = (infection_TP + infection_TN) / (
infection_TP + infection_TN + infection_FP + infection_FN
)
infection_dice_score = (2 * infection_TP) / (
2 * infection_TP + infection_FP + infection_FN
)
healthy_sensitivity = (healthy_TP) / (healthy_TP + healthy_FN)
healthy_specificity = (healthy_TN) / (healthy_TN + healthy_FP)
healthy_accuracy = (healthy_TP + healthy_TN) / (
healthy_TP + healthy_TN + healthy_FP + healthy_FN
)
healthy_dice_score = (2 * healthy_TP) / (
2 * healthy_TP + healthy_FP + healthy_FN
)
avg_infection_sensitivity += infection_sensitivity
avg_infection_specificity += infection_specificity
avg_infection_accuracy += infection_accuracy
avg_infection_dice_score += infection_dice_score
avg_healthy_sensitivity += healthy_sensitivity
avg_healthy_specificity += healthy_specificity
avg_healthy_accuracy += healthy_accuracy
avg_healthy_dice_score += healthy_dice_score
avg_infection_sensitivity = avg_infection_sensitivity / count_infection_sensitivity
avg_infection_specificity = avg_infection_specificity / N
avg_infection_accuracy = avg_infection_accuracy / N
avg_infection_dice_score = avg_infection_dice_score / N
avg_healthy_sensitivity = avg_healthy_sensitivity / N
avg_healthy_specificity = avg_healthy_specificity / N
avg_healthy_accuracy = avg_healthy_accuracy / N
avg_healthy_dice_score = avg_healthy_dice_score / N
return (
avg_infection_dice_score,
avg_infection_sensitivity,
avg_infection_specificity,
avg_infection_accuracy,
avg_healthy_dice_score,
avg_healthy_sensitivity,
avg_healthy_specificity,
avg_healthy_accuracy,
)
(
inf_ds,
inf_sen,
inf_spec,
inf_acc,
hea_ds,
hea_sen,
hea_spec,
hea_acc,
) = get_req_avg_eval_metrics(infection_masks, pred_masks)
print("Average Dice Score for Infection: ", inf_ds)
print("Average Sensitivity for Infection: ", inf_sen)
print("Average Specificity for Infection: ", inf_spec)
print("Average Accuracy for Infection: ", inf_acc)
print()
print("Average Dice Score for Healthy: ", hea_ds)
print("Average Sensitivity for Healthy: ", hea_sen)
print("Average Specificity for Healthy: ", hea_spec)
print("Average Accuracy for Healthy: ", hea_acc)
|
# Using Naive Bayes' to classify income in Adult Dataset
# for data manipulation
import numpy as np
import pandas as pd
# import category encoders
import category_encoders as ce
# pip install category_encoders
# for feature scaling
from sklearn.preprocessing import RobustScaler
# for model making
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
# for model accuracy
from sklearn.metrics import accuracy_score
# for data visualization
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# #### Loading the dataset
df = pd.read_csv("/kaggle/input/adult-dataset/adult.csv")
print(df.shape)
df.head()
# #### Since the dataset has no heading, we provide meaningful feature names to the dataset
col_names = [
"age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"sex",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"income",
]
df.columns = col_names
df.columns
df.head()
# #### Summary and Descriptive statistics of the dataset and checking for null values
df.info()
df.describe()
df.isnull().sum()
# #### Finding out categorical variables
categorical = [var for var in df.columns if df[var].dtype == "O"]
print("There are {} categorical variables\n".format(len(categorical)))
print("The categorical variables are :\n\n", categorical)
# #### Checking for inconsistency in categorical variables
df.workclass.value_counts()
# replacing '?' values in workclass variable with 'NaN'
df["workclass"].replace("?", np.NaN, inplace=True)
df.occupation.value_counts()
# replacing '?' values in workclass variable with 'NaN'
df["occupation"].replace("?", np.NaN, inplace=True)
df.native_country.value_counts()
# replacing '?' values in workclass variable with 'NaN'
df["native_country"].replace("?", np.NaN, inplace=True)
# #### Seperating target variable and features
X = df.drop(["income"], axis=1)
y = df["income"]
# #### Splitting the data into training and testing datasets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# #### One hot encoding the categorical variables
encoder = ce.OneHotEncoder(
cols=[
"workclass",
"education",
"marital_status",
"occupation",
"relationship",
"race",
"sex",
"native_country",
]
)
X_train = encoder.fit_transform(X_train)
X_test = encoder.transform(X_test)
print(X_train.shape)
X_train.head()
# #### Feature Scaling
X_train
cols = X_train.columns
scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=[cols])
X_test = pd.DataFrame(X_test, columns=[cols])
X_train.head()
# #### Model making
# instantiating model
model = GaussianNB()
model.fit(X_train, y_train)
# #### Predicting values
y_pred = model.predict(X_test)
y_pred_train = model.predict(X_train)
# #### Checking Model accuracy
print("Model accuracy score (testing dataset) : ", accuracy_score(y_test, y_pred))
print(
"Model accuracy score (training dataset) : ", accuracy_score(y_train, y_pred_train)
)
# ***Since the accuracy scores for testing and training dataset are comparable, there is no sign of overfitting***
y_pred_prob = model.predict_proba(X_test)
y_pred_prob
y_pred_prob_df = pd.DataFrame(data=y_pred_prob, columns=["P(<=50K)", "P(>50K)"])
y_pred_prob_df
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
pd.set_option("max_rows", None)
import regex as re
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import LinearSVR, SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
import warnings
warnings.filterwarnings(action="ignore")
data = pd.read_csv(
"/kaggle/input/smartphone-specifications-and-prices-in-india/smartphones - smartphones.csv"
)
data.head()
def onehot_encode(df, column):
df = df.copy()
dummies = pd.get_dummies(df[column], prefix=column)
if len(df[column].unique()) == 2:
dummies = dummies.drop(dummies.columns[0], axis=1)
df = pd.concat([df, dummies], axis=1)
df = df.drop(column, axis=1)
return df
def preprocess_inputs(df):
df = df.copy()
rows = df.loc[~(df.battery.str.split(" ").str[0]).str.contains("mAh")].index
df = df.drop(rows, axis=0)
### battery operation
df["batteryFastCharging"] = df.apply(
lambda x: 1 if "Fast Charging" in x["battery"] else 0, axis=1
)
df["batteryFastChargingW"] = (
df.battery.str.split("Battery with")
.str[1]
.str.replace("Fast Charging", "")
.str.replace("W", "")
)
df["batteryFastChargingW"] = df.apply(
lambda x: np.nan
if x["batteryFastCharging"] == 0
else ("45" if x["batteryFastChargingW"] == " " else x["batteryFastChargingW"]),
axis=1,
)
df["batteryFastChargingW"] = pd.to_numeric(
df["batteryFastChargingW"], errors="ignore"
)
df["batterymAh"] = (
df.battery.str.split(" ").str[0].str.replace("mAh", "").astype("int")
)
### Price operation
df["price"] = df.price.str.replace("₹", "").str.replace(",", "").astype("int")
### display operation
df["displaySizeInches"] = (
df.display.str.split(",").str[0].str.replace("inches", "").astype("float")
)
df["displayWidth"] = df["display"].str.extract(r"(\d+)\s*x").astype("float")
df["displayHeight"] = df["display"].str.extract(r"x\s*(\d+)").astype("float")
df["display1"] = df.display.str.split(",").str[1]
df.display1 = df.display1.str.replace("px", "")
df.display1 = df.display1.str.replace("Display", "")
df.display1 = df.display1.str.replace("with", "")
df.display1 = df.display1.str.replace(" ", "")
df["displayNotch"] = df["display1"].str.extract(
r"(\D+Notch)?$", flags=re.IGNORECASE
)
df["displayHz"] = df["display"].str.extract(r"(\d+)\s*Hz").astype("float")
### memory card operation
df["cardUpto"] = df["card"].str.extract(r"upto\s*(\d+)")
df["cardUpto"] = df["cardUpto"].str.replace(r"\b1\b", "1024")
df["cardUpto"] = df["cardUpto"].str.replace("1000", "1024")
df["cardUpto"] = df["cardUpto"].str.replace(r"\b2\b", "2048")
df["cardUpto"] = df["cardUpto"].astype("float")
### Sim operation
df["5G"] = df.apply(lambda x: 1 if "5G" in x["sim"] else 0, axis=1)
## Ram/Storage operation
df["Ram"] = (
(df.ram.str.split(",").apply(lambda x: x[0] if len(x) == 2 else np.NaN))
.str.extract(r"(\d+)\s*GB")
.astype("float")
)
df["Storage"] = (
(df.ram.str.split(",").apply(lambda x: x[1] if len(x) == 2 else x[0]))
.str.extract(r"(\d+)\s*GB")
.astype("float")
)
## Processor operation
df["processorClockSpeed"] = (
df["processor"].str.extract(r"(\d+\.?\d*)\s*GHz").astype("float")
)
df["processorNoOfCores"] = df["processor"].str.extract(r"(\w+)\sCore")
df["processorName"] = df.processor.str.split(",").str[0]
# ['Octa', 'Hexa', 'Quad', nan, 'Dual', 'Single']
df["processorNoOfCores"] = df["processorNoOfCores"].str.replace("Single", "1")
df["processorNoOfCores"] = df["processorNoOfCores"].str.replace("Dual", "2")
df["processorNoOfCores"] = df["processorNoOfCores"].str.replace("Quad", "4")
df["processorNoOfCores"] = df["processorNoOfCores"].str.replace("Hexa", "6")
df["processorNoOfCores"] = df["processorNoOfCores"].str.replace("Octa", "8")
mode_value = df["processorNoOfCores"].mode()[0] # calculate mode
df["processorNoOfCores"] = df["processorNoOfCores"].fillna(mode_value).astype(int)
df["processorNoOfCores"] = df["processorNoOfCores"].astype("int")
## Camera operation
df["camera"] = (
df["camera"]
.str.extractall(r"(\d+\.?\d*)\s*MP")[0]
.astype(float)
.groupby(level=0)
.apply(list)
.apply(sum)
)
for column in ["os", "displayNotch", "processorName"]:
df = onehot_encode(df, column=column)
df.cardUpto = df.cardUpto.fillna(df.cardUpto.mode()[0])
df.displayHz = df.displayHz.fillna(df.displayHz.mode()[0])
df.batteryFastChargingW = df.batteryFastChargingW.fillna(
df.batteryFastChargingW.mode()[0]
)
df.rating = df.rating.fillna(df.rating.mode()[0])
df.processorClockSpeed = df.processorClockSpeed.fillna(
df.processorClockSpeed.mode()[0]
)
df.camera = df.camera.fillna(df.camera.mode()[0])
df.Storage = df.Storage.fillna(df.Storage.mode()[0])
df.Ram = df.Ram.fillna(df.Ram.mode()[0])
drop_cols = [
"batteryFastCharging",
"battery",
"display1",
"display",
"card",
"sim",
"ram",
"processor",
"model",
]
df = df.drop(drop_cols, axis=1)
return df
# X.info()
XX = preprocess_inputs(data)
# X.Ram.str.split(' ')
# X['Ram'].str.extract(r'(\d+)\s*GB')
XX.head()
X = XX.drop("price", axis=1)
y = XX["price"]
# y
# X.isnull().sum().sort_values(ascending= False).head(8)
# X.cardUpto = X.cardUpto.fillna(X.cardUpto.mode()[0])
# X.displayHz = X.displayHz(X.displayHz.mode()[0])
# X.batteryFastChargingW = X.batteryFastChargingW(X.batteryFastChargingW.mode()[0])
# X.rating = X.rating(X.rating.mode()[0])
# X.processorClockSpeed = X.processorClockSpeed(X.processorClockSpeed.mode()[0])
# X.camera = X.camera(X.camera.mode()[0])
# X.Storage = X.Storage(X.Storage.mode()[0])
# X.Ram = X.Ram(X.Ram.mode()[0])
# X.batteryFastChargingW.mode()
# X.rating.mode()
# X.processorClockSpeed.mode()
# X.camera.mean()
# X.Storage.mode()
# X.Ram.mode()
# # X.shape
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
############### ModeL ####################
models = {
" Linear Regression": LinearRegression(),
" Linear Regression (L2 Regularization)": Ridge(),
" Linear Regression (L1 Regularization)": Lasso(),
" K-Nearest Neighbors": KNeighborsRegressor(),
" Neural Network": MLPRegressor(),
"Support Vector Machine (Linear Kernel)": LinearSVR(),
" Support Vector Machine (RBF Kernel)": SVR(),
" Decision Tree": DecisionTreeRegressor(),
" Random Forest": RandomForestRegressor(),
" Gradient Boosting": GradientBoostingRegressor(),
" XGBoost": XGBRegressor(),
# " LightGBM": LGBMRegressor(),
" CatBoost": CatBoostRegressor(verbose=0),
}
for name, model in models.items():
model.fit(X_train, y_train)
print(name + " trained.")
print("\nAll Model Trained")
################### RMSE & R^2 ###################
for name, model in models.items():
y_pred = model.predict(X_test)
rmse = np.sqrt(np.mean((y_test - y_pred) ** 2))
print(name + " RMSE: {:.4f}".format(rmse))
for name, model in models.items():
y_pred = model.predict(X_test)
r2 = 1 - (np.sum((y_test - y_pred) ** 2) / np.sum((y_test - y_test.mean()) ** 2))
print(name + " R^2: {:.5f}".format(r2))
############### Df #########################
modell = []
rmsel = []
r2l = []
for name, model in models.items():
y_pred = model.predict(X_test)
rmse = np.sqrt(np.mean((y_test - y_pred) ** 2))
# print(name + " RMSE: {:.4f}".format(rmse))
r2 = 1 - (np.sum((y_test - y_pred) ** 2) / np.sum((y_test - y_test.mean()) ** 2))
# print(name + " R^2: {:.5f}".format(r2))
modell.append(name)
rmsel.append(rmse)
r2l.append(r2)
df = pd.DataFrame({"ModelName": modell, "RMSE": rmsel, "R_squared": r2l})
df = df.sort_values(by=["RMSE"], ascending=True).reset_index(drop=True)
df
# XGB rockks!!
# This is the best Dataset I have worked on!
# Took every bit of my learning.
# Any suggestions are most welcome.
# Upvote if you like!
# Thanks!
|
import pandas as pd
import numpy as np
import seaborn as sns
df = pd.read_csv("../input/world-happiness/2019.csv")
df.head()
# data preprocessing****
df.shape
df.columns
df.isnull().sum()
sns.barplot(x="Country or region", y="Overall rank", data=df)
sns.pairplot(df)
tc = df.corr()
tc
sns.heatmap(tc, annot=True)
l = df["Country or region"]
d = df.drop(["Country or region"], axis=1)
print(l.shape)
print(d.shape)
from sklearn.preprocessing import StandardScaler
standardised_data = StandardScaler().fit_transform(d)
print(standardised_data.shape)
labels = l
data = d
from sklearn import decomposition
pca = decomposition.PCA()
sample_data = standardised_data
pca.n_components = 2
pca_data = pca.fit_transform(sample_data)
print(pca_data.shape)
pca_data = np.vstack((pca_data.T, labels)).T
pca_data
pca_df = pd.DataFrame(
data=pca_data, columns=("1st_principal", "2nd_principal", "labels")
)
import matplotlib.pyplot as plt
sns.FacetGrid(pca_df, hue="labels", height=6).map(
plt.scatter, "1st_principal", "2nd_principal"
).add_legend()
plt.show()
|
# #Scheherazade
# Scheherazade is a major female character and the storyteller in the frame narrative of the Middle Eastern collection of tales known as the One Thousand and One Nights.
# https://en.wikipedia.org/wiki/Scheherazade
# myCast.io
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
#
# #Rimsky Korsakov - Scheherazade (Şehrazat)
# https://www.youtube.com/watch?v=J0pic78Qkrs
# 1001 gece masallarının dünyası müzikle ancak bu kadar iyi anlatılabilirdi.En sevdiğim müziklerden biri.
# Importing essential libraries
import cv2, glob, os
from matplotlib import pyplot as plt
from PIL import Image
# #Codes by Shrushrita Sharma https://www.kaggle.com/shrushritasharma/understanding-basic-image-operations/comments
# Importing essential datasets
BASE = "../input/cusersmarildownloadsscheherazadepng/"
# Reading and understanding a single image
img = cv2.imread("../input/cusersmarildownloadsscheherazadepng/scheherazade.png")
plt.figure(figsize=(10, 10))
plt.imshow(img, cmap="twilight"), plt.axis("off"), plt.title(
"Scheherazade", fontsize=20
), plt.show()
# Visualizing the image histogram for first image
counts, bins, _ = plt.hist(
img.ravel(),
density=False,
alpha=0.8,
histtype="stepfilled",
color="#0303FF",
edgecolor="#44FF80",
)
# Understanding multivariate normal for the first image
x, y = np.random.multivariate_normal([0, 200], [[1, 0], [0, 200]], 10000).T
plt.hist2d(x, y, bins=30, cmap="Blues")
cb = plt.colorbar()
cb.set_label("Counts in Bin")
plt.show()
# Grayscale histogram
plt.figure(figsize=(15, 8))
plt.subplot(241), plt.plot(
cv2.calcHist([cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)], [0], None, [256], [0, 256]),
color="k",
), plt.title("Scheherazade", fontsize=15)
plt.subplot(242), plt.plot(
cv2.calcHist([img], [0], None, [256], [0, 256]), color="b"
), plt.xlim([0, 256])
plt.subplot(243), plt.plot(
cv2.calcHist([img], [0], None, [256], [0, 256]), color="g"
), plt.xlim([0, 256])
plt.subplot(244), plt.plot(
cv2.calcHist([img], [0], None, [256], [0, 256]), color="r"
), plt.xlim([0, 256])
plt.show()
# Grayscale Histogram Equalization
plt.figure(figsize=(20, 10))
plt.subplot(121), plt.imshow(
cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), cmap="gray"
), plt.axis("off"), plt.title("Scheherazade", fontsize=20)
plt.subplot(122), plt.imshow(
cv2.equalizeHist(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)), cmap="gray"
), plt.axis("off"), plt.title("Equalized Histogram", fontsize=20)
plt.show()
# 3-channel Histogram Equalization
channels = cv2.split(img)
eq_channels = []
for ch, color in zip(channels, ["B", "G", "R"]):
eq_channels.append(cv2.equalizeHist(ch))
plt.figure(figsize=(20, 10))
plt.subplot(121), plt.imshow(img, cmap="gray"), plt.axis("off"), plt.title(
"Scheherazade", fontsize=20
)
plt.subplot(122), plt.imshow(
cv2.cvtColor(cv2.merge(eq_channels), cv2.COLOR_BGR2RGB), cmap="gray"
), plt.axis("off"), plt.title("Equalized Histogram", fontsize=20)
plt.show()
# Averaging the images
plt.figure(figsize=(20, 10))
plt.subplot(121), plt.imshow(cv2.blur(img, (40, 40)), cmap="hsv"), plt.axis(
"off"
), plt.title("Scheherazade", fontsize=30)
plt.show()
# Gaussian filtering the images
plt.figure(figsize=(20, 10))
plt.subplot(121), plt.imshow(cv2.GaussianBlur(img, (5, 5), 0), cmap="hsv"), plt.axis(
"off"
), plt.title("Scheherazade", fontsize=30)
plt.show()
# Median filtering the images
plt.figure(figsize=(20, 10))
plt.subplot(121), plt.imshow(cv2.medianBlur(img, 5), cmap="Purples"), plt.axis(
"off"
), plt.title("Scheherazade", fontsize=30)
plt.show()
# Making borders for the images
plt.figure(figsize=(20, 10))
plt.subplot(231), plt.imshow(img, cmap="gray"), plt.axis("off"), plt.title(
"Grey", fontsize=25
)
plt.subplot(232), plt.imshow(
cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_REPLICATE), cmap="Blues"
), plt.axis("off"), plt.title("Replicate", fontsize=25)
plt.subplot(233), plt.imshow(
cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_REFLECT), cmap="gray"
), plt.axis("off"), plt.title("Reflect", fontsize=25)
plt.subplot(234), plt.imshow(
cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_REFLECT_101), cmap="Blues"
), plt.axis("off"), plt.title("Reflect 101", fontsize=25)
plt.subplot(235), plt.imshow(
cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_WRAP), cmap="gray"
), plt.axis("off"), plt.title("Wrap", fontsize=25)
plt.subplot(236), plt.imshow(
cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_CONSTANT, value=(120, 80, 250)),
cmap="Blues",
), plt.axis("off"), plt.title("Constant", fontsize=25)
plt.subplots_adjust(wspace=0.05, hspace=-0.3)
plt.show()
# Mask operations for the images
kernel = cv2.getGaussianKernel(15, 2.0)
kernel_2D = kernel @ kernel.transpose()
blurred_img = cv2.filter2D(img, -1, kernel_2D)
plt.imshow(blurred_img, cmap="Blues"), plt.axis("off"), plt.title(
"Gaussian masking", fontsize=20
), plt.show()
# Masking images
plt.imshow((img * 0.2).astype(np.uint8), cmap="Blues"), plt.axis("off"), plt.title(
"Masking images", fontsize=20
), plt.show()
# Uniform addition of pixel values to images
imgx = (img * 0.5 + (96, 128, 160)).clip(0, 255)
plt.imshow(imgx.astype(np.uint8), cmap="Blues"), plt.axis("off"), plt.title(
"Uniform addition", fontsize=20
), plt.show()
# Mask creation by drawing in image
mask_01 = np.zeros_like(img[0:300, 0:400])
cv2.rectangle(mask_01, (50, 50), (100, 200), (255, 255, 255), thickness=-1)
cv2.circle(mask_01, (200, 100), 50, (255, 255, 255), thickness=-1)
cv2.fillConvexPoly(
mask_01, np.array([[330, 50], [300, 200], [360, 150]]), (255, 255, 255)
)
mask_01x = cv2.resize(mask_01, img.shape[1::-1])
plt.imshow(mask_01), plt.axis("off"), plt.title("Sample Mask", fontsize=20), plt.show()
# Bitwise and with the mask created
plt.figure(figsize=(10, 10))
plt.imshow(cv2.bitwise_and(img, mask_01x)), plt.axis("off"), plt.title(
"Bitwise masking", fontsize=20
), plt.show()
# #Seems like we're peeking through a hole to observe Scheherazade.
# Reading a new image for working with image channels
img = cv2.imread(BASE + "scheherazade.png")
print(img.shape)
# Splitting the channels
plt.figure(figsize=(15, 15))
b, g, r = cv2.split(img)
mask_03 = np.zeros(img.shape[:2], dtype="uint8")
imgx = cv2.merge((mask_03, g, r))
plt.subplot(221), plt.imshow(img[:, :, 0], cmap="gray"), plt.axis("off"), plt.title(
"Red Channel", fontsize=20
)
plt.subplot(222), plt.imshow(img[:, :, 1], cmap="gray"), plt.axis("off"), plt.title(
"Green Channel", fontsize=20
)
plt.subplot(223), plt.imshow(img[:, :, 2], cmap="gray"), plt.axis("off"), plt.title(
"Blue Channel", fontsize=20
)
plt.subplot(224), plt.imshow(imgx), plt.axis("off"), plt.title(
"Channels Merged", fontsize=20
)
plt.subplots_adjust(wspace=0, hspace=-0.25)
plt.show()
# Crop and Resize Images
height, width = img.shape[:2]
quarter_height, quarter_width = height / 4, width / 4
T = np.float32([[1, 0, quarter_width], [0, 1, quarter_height]])
plt.figure(figsize=(20, 15))
plt.subplot(231), plt.imshow(img), plt.axis("off"), plt.title(
"Original Image", fontsize=20
)
plt.subplot(232), plt.imshow(cv2.resize(img, (200, 200))), plt.axis("off"), plt.title(
"Resized Image", fontsize=20
)
plt.subplot(234), plt.imshow(cv2.warpAffine(img, T, (width, height))), plt.axis(
"off"
), plt.title("Translated Image", fontsize=20)
plt.subplot(235), plt.imshow(cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)), plt.axis(
"off"
), plt.title("Rotated Image", fontsize=20)
plt.subplot(236), plt.imshow(np.flip(img, (0, 1))), plt.axis("off"), plt.title(
"Flipped Image", fontsize=20
)
# Code by Olga Belitskaya https://www.kaggle.com/olgabelitskaya/sequential-data/comments
from IPython.display import display, HTML
c1, c2, f1, f2, fs1, fs2 = "#eb3434", "#eb3446", "Akronim", "Smokum", 30, 15
def dhtml(string, fontcolor=c1, font=f1, fontsize=fs1):
display(
HTML(
"""<style>
@import 'https://fonts.googleapis.com/css?family="""
+ font
+ """&effect=3d-float';</style>
<h1 class='font-effect-3d-float' style='font-family:"""
+ font
+ """; color:"""
+ fontcolor
+ """; font-size:"""
+ str(fontsize)
+ """px;'>%s</h1>""" % string
)
)
dhtml("Be patient. Marília Prata, @mpwolke was Here.")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import LocalOutlierFactor
from sklearn.model_selection import cross_val_score, train_test_split, KFold
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.ensemble import (
GradientBoostingRegressor,
RandomForestRegressor,
AdaBoostRegressor,
ExtraTreesRegressor,
)
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
from sklearn.linear_model import (
Ridge,
RidgeCV,
BayesianRidge,
LinearRegression,
Lasso,
LassoCV,
ElasticNet,
RANSACRegressor,
HuberRegressor,
PassiveAggressiveRegressor,
ElasticNetCV,
)
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import VotingRegressor
from sklearn.svm import SVR
from sklearn.kernel_ridge import KernelRidge
# ## EDA
# loading data
df_train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
df_train.drop("Id", axis=1, inplace=True)
df_test = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
# Plotting the first 9 correlations
plt.figure(figsize=(15, 6))
keys = (
df_train.select_dtypes(np.number)
.corr()["SalePrice"]
.abs()
.sort_values(ascending=False)[1:13]
.index
)
for i, key in enumerate(keys):
plt.subplot(3, 4, i + 1)
plt.scatter(df_train[key], df_train["SalePrice"])
plt.title(key)
plt.tight_layout()
plt.show()
# Observe the distribution
plt.figure(figsize=(12, 6))
plt.hist(df_train["SalePrice"], bins=50)
plt.xlabel("SalePrice")
plt.ylabel("count")
plt.show()
# Observe the distribution log(SalePrice + 1)
plt.figure(figsize=(12, 6))
plt.hist(np.log1p(df_train["SalePrice"]), bins=50)
plt.xlabel("log(SalePrice + 1)")
plt.ylabel("count")
plt.show()
# ## Data pre-processing
# Data type checking
cols = ["MSSubClass"]
for key in cols:
df_train[key] = df_train[key].astype(str)
df_train[key] = df_train[key].astype(str)
# missing value imputation
nan_number = df_train.isnull().sum()
display(nan_number[nan_number > 0].sort_values(ascending=False))
nan_number_test = df_test.isnull().sum()
display(nan_number_test[nan_number_test > 0].sort_values(ascending=False))
cols1 = [
"Alley",
"MasVnrType",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Electrical",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
]
for col in cols1:
df_train[col].fillna("None", inplace=True)
df_test[col].fillna("None", inplace=True)
cols2 = ["MasVnrArea", "GarageYrBlt", "LotFrontage"]
for col in cols2:
df_train[col].fillna(df_train[col].mode().values[0], inplace=True)
df_test[col].fillna(df_train[col].mode().values[0], inplace=True)
for col in (
"GarageArea",
"GarageCars",
"TotalBsmtSF",
"BsmtFinSF1",
"BsmtUnfSF",
):
df_test[col] = df_test[col].fillna(0)
# Outlier detection
clf = LocalOutlierFactor()
clf.fit(df_train.select_dtypes(np.number).values)
outlier = np.where(clf.negative_outlier_factor_ < -2)[0]
print("outlier:", len(outlier))
print("total:", len(df_train))
print(f"rate: {len(outlier) / len(df_train):.2%}")
df_train = df_train[~df_train.index.isin(outlier)]
# ## feature engineering
keys = df_train.select_dtypes("object")
items = []
for key in keys:
items.append({"name": key, "count": len(df_train[key].unique())})
pd.DataFrame(items).sort_values("count", ascending=False)
keys = df_train.keys()[:-1]
X = df_train[keys].select_dtypes(np.number)
X = pd.concat([X, pd.get_dummies(df_train.select_dtypes("object"))], axis=1)
y = df_train["SalePrice"]
# y = df_train["SalePrice"]
y = np.log1p(df_train["SalePrice"])
rfr = RandomForestRegressor(random_state=42)
rfr.fit(X, y)
from collections import defaultdict
importances = defaultdict(int)
for name, value in zip(X.keys(), rfr.feature_importances_):
importances[name.split("_")[0]] += value
df_importances = pd.DataFrame(
{"feature": importances.keys(), "importances": importances.values()}
).sort_values("importances", ascending=False)
display(df_importances[df_importances["importances"] > 0.005])
features = df_importances[df_importances["importances"] > 0.005]["feature"]
# Data transformation
X_train = pd.concat(
[
df_train[features].select_dtypes(np.number),
pd.get_dummies(df_train[features].select_dtypes("object")),
],
axis=1,
)
# y_train = df_train["SalePrice"]
y_train = np.log1p(df_train["SalePrice"])
X_test = pd.concat(
[
df_test[features].select_dtypes(np.number),
pd.get_dummies(df_test[features].select_dtypes("object")),
],
axis=1,
)
from sklearn.model_selection import cross_val_score
def rmse_cv(model, X, y):
rmse = np.sqrt(
-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=5)
)
return rmse
rfr = RandomForestRegressor(n_jobs=-1)
rfr.fit(X_train, y_train)
rmse_cv(rfr, X_train, y_train)
# ## Splitting Train dataset
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.2, random_state=41
)
X_test.shape
def rmse_cv(model):
# Define the number of folds for cross-validation
num_folds = 5
# Define the evaluation metric(s) you want to use
scoring = "neg_mean_squared_error" # Example: using negative mean squared error
# Create a k-fold cross-validation object
kf = KFold(
n_splits=num_folds, shuffle=True, random_state=42
) # Example: using 5-fold cross-validation
# Perform k-fold cross-validation and store the evaluation results
rmse_scores = np.sqrt(
-cross_val_score(model, X_train, y_train, scoring=scoring, cv=kf)
)
return rmse_scores
# ## LinearRegression
lr = LinearRegression()
lr.fit(X_train, y_train)
r2 = lr.score(X_val, y_val)
print("R2: ", r2)
y_pred = lr.predict(X_val)
mse = mean_squared_error(y_val, y_pred)
rmse = np.sqrt(mse)
print("LinearRegression RMSE: ", rmse)
rmse_scores = rmse_cv(lr)
# Calculate the mean and standard deviation of the MSE scores
mean_mse = np.mean(rmse_scores)
std_mse = np.std(rmse_scores)
# Print the evaluation results
print(f"LinearRegression RMSE: {mean_mse}")
print(f"LinearRegression Standard Deviation of RMSE: {std_mse}")
# ## Lasso Regression
lasso = Lasso(alpha=0.1)
lasso.fit(X_train, y_train)
y_pred = lasso.predict(X_val)
mse = mean_squared_error(y_val, y_pred)
rmse = np.sqrt(mse)
print("lasso RMSE:", rmse)
rmse_scores = rmse_cv(lasso)
# Calculate the mean and standard deviation of the MSE scores
mean_mse = np.mean(rmse_scores)
std_mse = np.std(rmse_scores)
# Print the evaluation results
print(f"lasso RMSE: {mean_mse}")
print(f"lasso Standard Deviation of RMSE: {std_mse}")
# ## Random Forest Regressor
rfr = RandomForestRegressor(random_state=41, n_estimators=500)
rfr.fit(X_train, y_train)
y_pred = rfr.predict(X_val)
mse = mean_squared_error(y_val, y_pred)
rmse = np.sqrt(mse)
print("RandomForestRegressor RMSE: ", rmse)
rmse_scores = rmse_cv(rfr)
# Calculate the mean and standard deviation of the MSE scores
mean_mse = np.mean(rmse_scores)
std_mse = np.std(rmse_scores)
# Print the evaluation results
print(f"rfr RMSE: {mean_mse}")
print(f"rfr Standard Deviation of RMSE: {std_mse}")
# ### Find the best n_estimators
# set range of n_estimators
n_estimators = range(50, 600, 50)
test_scores = []
# Grid search to find the best max_feature
best_test_score = float("inf")
best_n_estimators = float("inf")
for n_estimator in n_estimators:
rfr = RandomForestRegressor(n_estimators=n_estimator, random_state=41, n_jobs=-1)
test_score = rmse_cv(rfr)
test_scores.append(np.mean(test_score))
if best_test_score > np.mean(test_score):
best_test_score = np.mean(test_score)
best_n_estimators = n_estimator
print(
"RandomForestRegressor, best_test_score : ",
best_test_score,
",best_n_estimators : ",
best_n_estimators,
)
plt.plot(n_estimators, test_scores)
plt.xlabel("n_estimators")
plt.ylabel("test_scores")
plt.show()
plt.close()
# ### Find the best best_max_feature
# RandomForestRegressor
# Sets the proportion of features used by decision trees in a random forest
max_features = [0.1, 0.3, 0.5, 0.7, 0.9, 0.99]
test_scores = []
# Grid search to find the best max_feature
best_test_score = float("inf")
best_max_feature = float("inf")
for max_feat in max_features:
rfr = RandomForestRegressor(
random_state=41,
n_estimators=550,
max_features=max_feat,
)
test_score = rmse_cv(rfr)
test_scores.append(np.mean(test_score))
if best_test_score > np.mean(test_score):
best_test_score = np.mean(test_score)
best_max_feature = max_feat
print(
"RandomForestRegressor, best_test_score : ",
best_test_score,
",best_max_feature",
best_max_feature,
)
plt.plot(max_features, test_scores)
plt.xlabel("max_features")
plt.ylabel("test_scores")
plt.show()
plt.close()
# RandomForestRegressor
# Sets the range of max depths
max_depths = range(5, 100, 5)
test_scores = []
# Grid search to find the best max_feature
best_test_score = float("inf")
best_max_depth = float("inf")
for max_depth in max_depths:
rfr = RandomForestRegressor(
random_state=41, n_estimators=550, max_features=0.5, max_depth=max_depth
)
test_score = rmse_cv(rfr)
test_scores.append(np.mean(test_score))
if best_test_score > np.mean(test_score):
best_test_score = np.mean(test_score)
best_max_depth = max_depth
print(
"RandomForestRegressor, best_test_score : ",
best_test_score,
",best_max_depth: ",
best_max_depth,
)
plt.plot(max_depths, test_scores)
plt.xlabel("max_depths")
plt.ylabel("test_scores")
plt.show()
plt.close()
# ### Determine the optimal RandomForestRegressor model
# According to the grid search above, the best hyperparameters is as below when using random forests
rfr = RandomForestRegressor(
random_state=41, n_estimators=550, max_features=0.5, max_depth=25
)
rfr.fit(X_train, y_train)
# ### Compare the Base models scores
r_s = 41
my_regressors = [
LinearRegression(),
Lasso(alpha=0.1, random_state=r_s),
RandomForestRegressor(
n_estimators=550, max_features=0.5, max_depth=25, random_state=r_s
),
]
regressors = []
for my_regressor in my_regressors:
regressors.append(my_regressor)
scores_val = []
scores_train = []
RMSE = []
for regressor in regressors:
scores_val.append(regressor.fit(X_train, y_train).score(X_val, y_val))
scores_train.append(regressor.fit(X_train, y_train).score(X_train, y_train))
y_pred = regressor.predict(X_val)
RMSE.append(
np.sqrt(mean_squared_error(np.log(np.expm1(y_val)), np.log(np.expm1(y_pred))))
)
results = zip(scores_val, scores_train, RMSE)
results = list(results)
results_score_val = [item[0] for item in results]
results_score_train = [item[1] for item in results]
results_RMSE = [item[2] for item in results]
df_results = pd.DataFrame(
{
"Algorithms": my_regressors,
"Training Score": results_score_train,
"Validation Score": results_score_val,
"RMSE": results_RMSE,
}
)
df_results
# ### Sort according to RMSE from small to large to find the best model
best_models = df_results.sort_values(by="RMSE")
best_model = best_models.iloc[0][0]
best_stack = best_models["Algorithms"].values
best_models
# ### Average the base models
df_test.shape
X_test.isnull().sum().sort_values(ascending=False)
# Use linear regression, lasso regression and random forest models to make predictions respectively
y_linear = np.expm1(lr.predict(X_test))
y_lasso = np.expm1(lasso.predict(X_test))
y_rfr = np.expm1(rfr.predict(X_test))
# To ensure that we have a more robust model,take the arithmetic mean of the 3 models as the predicted value
y_final = (y_linear + y_rfr) / 2
# ### Submit the results
# submission_data is the final prediction result
submission_data = pd.DataFrame(data={"Id": df_test["Id"], "SalePrice": y_final})
submission_data.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import os
# wget from https://www.eia.gov/opendata/bulkfiles.php#d-use-common-core-and-extensible-metadata
# I think this is the file to wget
# # Make raw dataframes
def process_txt_line(df_data, df_data_dictionary, category_dict, dict_EBA_line):
if "series_id" in dict_EBA_line:
df_data, df_data_dictionary = augment_data(
df_data, df_data_dictionary, dict_EBA_line
)
elif "category_id" in dict_EBA_line:
category_dict = augment_category(category_dict, dict_EBA_line)
return df_data, df_data_dictionary, category_dict
def augment_category(category_dict, dict_EBA_line):
category_id = dict_EBA_line["category_id"]
category_dict[category_id] = dict_EBA_line
return category_dict
def augment_data(df_data, df_data_dictionary, dict_EBA_line):
series_id = dict_EBA_line["series_id"]
# print(series_id)
df_tmp = pd.DataFrame(dict_EBA_line["data"], columns=["Date", series_id])
df_tmp.index = pd.to_datetime(df_tmp["Date"])
del df_tmp["Date"]
if df_data is None:
df_data = df_tmp
else:
df_data = df_data.merge(df_tmp, left_index=True, right_index=True, how="outer")
df_data_dictionary = augment_data_dictionary(df_data_dictionary, dict_EBA_line)
return df_data, df_data_dictionary
def augment_data_dictionary(df_data_dictionary, dict_EBA_line):
del dict_EBA_line["data"]
if df_data_dictionary is None:
df_data_dictionary = pd.DataFrame(
columns=[
"name",
"start",
"end",
"f",
"geoset_id",
"units",
"last_updated",
"description",
]
)
df_data_dictionary = df_data_dictionary.append(dict_EBA_line, ignore_index=True)
return df_data_dictionary
file_path = "../input/hourly-electricity-data-in-the-us/EBA.txt"
count = 0
df_data = None
df_data_dictionary = None
category_dict = {}
debug = False
with open(file_path, "r") as EBA_file:
for EBA_line in EBA_file:
count += 1
if count % 100 == 0:
print("Line {}:".format(count))
dict_EBA_line = json.loads(EBA_line.strip())
df_data, df_data_dictionary, category_dict = process_txt_line(
df_data, df_data_dictionary, category_dict, dict_EBA_line
)
raw_folder = "/kaggle/working/raw"
if not os.path.exists(raw_folder):
os.mkdir(raw_folder)
df_data.to_csv(f"{raw_folder}/data_electricity_consumption.csv")
df_data_dictionary.index = df_data_dictionary["series_id"]
del df_data_dictionary["series_id"]
df_data_dictionary.to_csv(f"{raw_folder}/data_dictionary_electricity_consumption.csv")
with open(f"{raw_folder}category_mapping.json", "w") as category_json_file:
json.dump(category_dict, category_json_file)
# # Create Process Files
category_id_list = []
top_level_parent = []
# count = len(parent_categories)
for category_id in category_dict:
category_id_list.append(category_id)
for category_id in category_dict:
if category_dict[category_id]["parent_category_id"] not in category_id_list:
top_level_parent.append(category_id)
top_level_parent
category_map = {}
count = 0
for parent_category in top_level_parent:
category_map[parent_category] = {}
count += 1
for category_id in category_dict:
if category_dict[category_id]["parent_category_id"] in category_map:
parent_category = category_dict[category_id]["parent_category_id"]
category_map[parent_category][category_id] = {}
count += 1
for category_id in category_dict:
for top_level_parent_category in category_map:
if (
category_dict[category_id]["parent_category_id"]
in category_map[top_level_parent_category]
):
parent_category = category_dict[category_id]["parent_category_id"]
category_map[top_level_parent_category][parent_category][category_id] = {}
count += 1
assert count == len(category_dict) # to make sure we have accounted for each mapping
# ## Summary category view
# Showing 2 levels and a summary for the third
for top_level_parent_category in category_map:
print(
"L1 - {} ({}): {}".format(
category_dict[top_level_parent_category]["name"],
top_level_parent_category,
len(category_map[top_level_parent_category]),
)
)
for parent_id in category_map[top_level_parent_category]:
print(
" L2 - {} ({}): {}".format(
category_dict[parent_id]["name"],
parent_id,
len(category_map[top_level_parent_category][parent_id]),
)
)
print("\n")
# ## Full category view
# Full details for all three levels
for top_level_parent_category in category_map:
print(
"L1 - {} ({}): {}".format(
category_dict[top_level_parent_category]["name"],
top_level_parent_category,
len(category_map[top_level_parent_category]),
)
)
for parent_id in category_map[top_level_parent_category]:
print(" L2 - {} ({})".format(category_dict[parent_id]["name"], parent_id))
if (len(category_map[top_level_parent_category][parent_id])) > 0:
for bottom_level_parent_category in category_map[top_level_parent_category][
parent_id
]:
print(
" L3 - {} ({})".format(
category_dict[bottom_level_parent_category]["name"],
bottom_level_parent_category,
)
)
print("\n")
# ## Create Procoessed CSVS
for top_level_parent_id in category_map:
top_level_dir = f"/kaggle/working/processed/{category_dict[top_level_parent_id]['name']}".strip()
if not os.path.exists(top_level_dir):
os.mkdir(f"{top_level_dir}")
for mid_level_parent_id in category_map[top_level_parent_id]:
if len(category_map[top_level_parent_id][mid_level_parent_id]) > 0:
mid_level_dir = (
f"{top_level_dir}/{category_dict[mid_level_parent_id]['name']}".strip()
)
if not os.path.exists(mid_level_dir):
os.mkdir(mid_level_dir)
for bottom_level_parent_id in category_map[top_level_parent_id][
mid_level_parent_id
]:
bottom_level_dir = (
f"{mid_level_dir}/{category_dict[bottom_level_parent_id]['name']}"
)
if not os.path.exists(bottom_level_dir):
os.mkdir(bottom_level_dir)
filename = f"{bottom_level_dir}/{category_dict[bottom_level_parent_id]['name']}.csv"
cs_in_df = set(
category_dict[bottom_level_parent_id]["childseries"]
).intersection(df_data.columns)
df_data.loc[:, cs_in_df].to_csv(filename)
else:
filename = (
f"{mid_level_dir}/{category_dict[mid_level_parent_id]['name']}.csv"
)
cs_in_df = set(
category_dict[mid_level_parent_id]["childseries"]
).intersection(df_data.columns)
df_data.loc[:, cs_in_df].to_csv(filename)
|
# # Importance of the Project
# **The COVID-19 pandemic has caused significant disruption globally, and the situation continues to evolve. The analysis and prediction of COVID-19 spread are essential for effective public health policies and prevention strategies. This project aims to analyze COVID-19 data from various sources and develop models to predict the future spread of the virus.**
# ### Dataset Description
# Dataset contails symptoms of patients which is crucial to identify the infection of covid. Columns are categorical in nature.
# Details of the columns are :
# * ID (Individual ID)
# * Sex (male/female).
# * Age ≥60 above years (true/false)
# * Test date (date when tested for COVID)
# * Cough (true/false).
# * Fever (true/false).
# * Sore throat (true/false).
# * Shortness of breath (true/false).
# * Headache (true/false).
# * Known contact with an individual confirmed to have COVID-19 (true/false).
# * Corona positive or negative
#
# importing necessary libraries
import numpy
import pandas
import seaborn
import matplotlib.pyplot as plt
from sklearn.impute import KNNImputer
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import (
accuracy_score,
make_scorer,
recall_score,
precision_score,
f1_score,
roc_curve,
auc,
confusion_matrix,
)
import warnings
warnings.filterwarnings("ignore")
# loading the dataset
covid = pandas.read_csv("/kaggle/input/corona-tested-dataset/corona_tested_006.csv")
covid.head() # first 5 rows
covid.tail() # last 5 rows
# **covid symtoms have 'true' and 'True'. In english meaning is same but python interprets as different**
# defining a function to convert the data
def convert(x):
if x == "TRUE":
return "true"
elif x == True:
return "true"
elif x == "FALSE" or x == False:
return "false"
else:
return x
# mapping the function
covid["Cough_symptoms"] = covid["Cough_symptoms"].map(convert)
covid["Fever"] = covid["Fever"].map(convert)
covid["Sore_throat"] = covid["Sore_throat"].map(convert)
covid["Shortness_of_breath"] = covid["Shortness_of_breath"].map(convert)
covid["Headache"] = covid["Headache"].map(convert)
# renaming a column
covid.rename(columns={"Sex": "Gender"}, inplace=True)
covid.info()
# **In the dataset the value 'None' should be replaced with NaN**
covid.replace({"None": numpy.nan}, inplace=True)
# Finding the total null values
covid.isnull().sum()
for i in covid.columns:
print(f"{i} has", round(covid[i].isnull().sum() * 100 / covid.shape[0], 5), "%")
# dropping nan rows for columns having less than 1% nan
covid.dropna(
subset=[
"Cough_symptoms",
"Fever",
"Sore_throat",
"Shortness_of_breath",
"Headache",
],
axis=0,
inplace=True,
)
# drop age_60_above column
covid.drop("Age_60_above", axis=1, inplace=True)
covid.replace({"other": numpy.nan}, inplace=True)
covid.dropna(subset=["Corona"], axis=0, inplace=True)
# converting the datatype as categorical
for i in covid.columns:
if i == "Ind_ID" or i == "Test_date" or i == "Test_date":
pass
else:
covid[i] = covid[i].astype("category")
covid.info()
# univatiate analysis
plt.figure(figsize=(4, 3))
seaborn.countplot(x="Cough_symptoms", hue="Corona", data=covid, width=0.4)
plt.show()
plt.figure(figsize=(4, 3))
seaborn.countplot(x="Fever", hue="Corona", data=covid, width=0.4)
plt.show()
plt.figure(figsize=(4, 3))
seaborn.countplot(x="Sore_throat", hue="Corona", data=covid, width=0.4)
plt.show()
plt.figure(figsize=(4, 3))
seaborn.countplot(x="Shortness_of_breath", hue="Corona", data=covid, width=0.4)
plt.show()
plt.figure(figsize=(4, 3))
seaborn.countplot(x="Headache", hue="Corona", data=covid, width=0.4)
plt.show()
plt.figure(figsize=(4, 3))
seaborn.countplot(x="Gender", hue="Corona", data=covid, width=0.4)
plt.show()
plt.figure(figsize=(4, 3))
seaborn.countplot(x="Known_contact", hue="Corona", data=covid, width=0.4)
plt.show()
covid.isnull().sum()
covid_data = covid.copy(deep=True)
# ## Encoding
# Encoding
covid_data["Cough_symptoms"] = pandas.get_dummies(
covid_data["Cough_symptoms"], drop_first=True
)
covid_data["Fever"] = pandas.get_dummies(covid_data["Fever"], drop_first=True)
covid_data["Sore_throat"] = pandas.get_dummies(
covid_data["Sore_throat"], drop_first=True
)
covid_data["Shortness_of_breath"] = pandas.get_dummies(
covid_data["Shortness_of_breath"], drop_first=True
)
covid_data["Headache"] = pandas.get_dummies(covid_data["Headache"], drop_first=True)
covid_data["Corona"] = pandas.get_dummies(covid_data["Corona"], drop_first=True)
covid_data["Gender"].replace({"male": 0, "female": 1}, inplace=True)
covid_data["Known_contact"].replace(
{"Other": 2, "Contact with confirmed": 1, "Abroad": 0}, inplace=True
)
covid_data.isnull().sum()
# ## Imputation
imputed_data = covid_data.copy(deep=True)
# applying KNN imputation
knn = KNNImputer(n_neighbors=5, weights="uniform")
columns = [
"Cough_symptoms",
"Fever",
"Sore_throat",
"Shortness_of_breath",
"Headache",
"Gender",
"Known_contact",
]
imputed = knn.fit_transform(covid_data[columns])
df = pandas.DataFrame(imputed, columns=columns)
def change(x):
if x > 0.5:
return 1
elif x < 0.5:
return 0
else:
return x
df["Gender"] = df["Gender"].apply(change)
imputed_data["Gender"].iloc[:] = df["Gender"]
imputed_data.head()
imputed_data.info()
imputed_data["Gender"] = imputed_data["Gender"].astype("uint8")
final_data = imputed_data.astype("category")
final_data.info()
# # Feature Engineering
#
# separating features and target
features = final_data.drop(["Ind_ID", "Test_date", "Corona"], axis=1)
target = final_data["Corona"]
# chi2 method to select important k best features
selector = SelectKBest(score_func=chi2, k=6)
X_new = selector.fit_transform(features, target)
idxs_selected = selector.get_support(indices=True)
feat_names = features.columns[idxs_selected]
print(feat_names)
# **covid symptoms are the important features of the dataset**
# splitting train test set
x_train, x_test, y_train, y_test = train_test_split(
features, target, test_size=0.3, random_state=42
)
y_test.value_counts()
y_train.value_counts()
# **1. random forest classifier**
rf = RandomForestClassifier()
rf.fit(x_train, y_train)
pred_rf = rf.predict(x_test)
# accuracy score
accuracy_rf = accuracy_score(y_test, pred_rf)
print("Accuracy:", accuracy_rf * 100)
# ##### Parametre tuning for random forest
rfc = RandomForestClassifier(random_state=42)
param_grid = {
"n_estimators": [50, 100, 200],
"max_depth": [5, 10, 15, 20, None],
"max_features": ["sqrt", "log2"],
"bootstrap": [True, False],
}
scorer = make_scorer(accuracy_score)
grid_obj = GridSearchCV(rfc, param_grid, scoring=scorer)
grid_fit = grid_obj.fit(x_train, y_train)
# Get the best hyperparameters
best_params = grid_fit.best_params_
best_params
# Training the model using the best hyperparameters
rfc_best = RandomForestClassifier(random_state=42, **best_params)
rfc_best.fit(x_train, y_train)
y_pred_rf = rfc_best.predict(x_test)
# Evaluate the best model
accuracy_rf = accuracy_score(y_test, y_pred_rf)
print("Accuracy: {:.10f}%".format(accuracy_rf * 100.0))
# ##### performance measures for random forest
#
# confusion matrix
cm_rf = confusion_matrix(y_test, y_pred_rf)
print("Confusion Matrix:\n", cm_rf)
# precision
prec_rf = cm_rf[0][0] * 100 / (cm_rf[0][0] + cm_rf[0][1])
print("Precision:", prec_rf)
# recall
recall_rf = cm_rf[0][0] * 100 / (cm_rf[0][0] + cm_rf[1][0])
print("recall :", recall_rf)
# F-1 Score
f1_rf = 2 * prec_rf * recall_rf / (recall_rf + prec_rf)
print("F1 Score:", f1_rf)
# False Negative Score
fnr_rf = cm_rf[1][0] * 100 / (cm_rf[0][0] + cm_rf[1][0])
print("False Negative rate : ", fnr_rf)
# AUC ROC curve
fpr, tpr, thresholds = roc_curve(y_test, pred_rf)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, color="darkorange", lw=2, label="ROC curve (area = %0.2f)" % roc_auc)
plt.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend(loc="lower right")
plt.show()
# **2. Decision Tree Classifier**
dt = DecisionTreeClassifier()
dt.fit(x_train, y_train)
y_pred = dt.predict(x_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100)
# **Parameter tuning for Decision tree model**
dtc = DecisionTreeClassifier(random_state=42)
param_grid = {
"max_depth": [3, 5, 7, 9],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [1, 2, 4],
}
scorer = make_scorer(accuracy_score)
grid = GridSearchCV(dtc, param_grid=param_grid, scoring=scorer, cv=5)
grid.fit(x_train, y_train)
print("Best parameters:", grid.best_params_)
dtc_best = DecisionTreeClassifier(max_depth=7, min_samples_leaf=4, min_samples_split=10)
dtc_best.fit(x_train, y_train)
y_dt = dtc_best.predict(x_test)
accuracy_dtc = accuracy_score(y_test, y_dt)
print("Test accuracy:", accuracy_dtc * 100)
# **Performance measures for Decision Tree Classifier**
# confusion matrix
cm_dt = confusion_matrix(y_test, y_dt)
print("Confusion Matrix:\n", cm_dt)
# precision
prec_dt = cm_dt[0][0] * 100 / (cm_dt[0][0] + cm_dt[0][1])
print("Precision:", prec_dt)
# recall
recall_dt = cm_dt[0][0] * 100 / (cm_dt[0][0] + cm_dt[1][0])
print("recall :", recall_dt)
# F-1 Score
f1_dt = 2 * prec_rf * recall_dt / (recall_dt + prec_dt)
print("F1 Score:", f1_rf)
# False Negative Score
fnr_dt = cm_dt[1][0] * 100 / (cm_dt[0][0] + cm_dt[1][0])
print("False Negative rate : ", fnr_dt)
# AUC ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_dt)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, color="darkorange", lw=2, label="ROC curve (area = %0.2f)" % roc_auc)
plt.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend(loc="lower right")
plt.show()
# **3. Naive Bias Classifier**
nb = GaussianNB()
nb.fit(x_train, y_train)
y_pred_nb = nb.predict(x_test)
accuracy_nb = accuracy_score(y_test, y_pred_nb)
accuracy_nb
# **As naive bias model giving poor accuracy so tuning may not be required**
# **4. Adaboost classifier**
adaboost = AdaBoostClassifier()
adaboost.fit(x_train, y_train)
y_pred_ada = adaboost.predict(x_test)
accuracy_ada = accuracy_score(y_test, y_pred_ada)
accuracy_ada
# **Parameter tuning for adaboost model**
adaboost_params = {"n_estimators": [50, 100, 200], "learning_rate": [0.1, 0.01, 0.001]}
adaboost_grid = GridSearchCV(adaboost, adaboost_params, cv=5)
adaboost_grid.fit(x_train, y_train)
print("Best Hyperparameters for AdaBoost: ", adaboost_grid.best_params_)
adaboost = AdaBoostClassifier(learning_rate=0.1, n_estimators=200)
adaboost.fit(x_train, y_train)
y_pred_ada = adaboost.predict(x_test)
accuracy_ada = accuracy_score(y_test, y_pred_ada)
accuracy_ada
# **Performance measures of Adaboost model**
# confusion matrix
cm_ada = confusion_matrix(y_test, y_pred_ada)
print("Confusion Matrix:\n", cm_ada)
# precision
prec_ada = cm_ada[0][0] * 100 / (cm_ada[0][0] + cm_ada[0][1])
print("Precision:", prec_ada)
# recall
recall_ada = cm_ada[0][0] * 100 / (cm_ada[0][0] + cm_ada[1][0])
print("recall :", recall_rf)
# F-1 Score
f1_ada = 2 * prec_ada * recall_ada / (recall_ada + prec_ada)
print("F1 Score:", f1_rf)
# False Negative Score
fnr_ada = cm_ada[1][0] * 100 / (cm_ada[0][0] + cm_ada[1][0])
print("False Negative rate : ", fnr_ada)
# AUC ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_pred_ada)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, color="darkorange", lw=2, label="ROC curve (area = %0.2f)" % roc_auc)
plt.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend(loc="lower right")
plt.show()
# **5. Support Vector Machine Classifier**
svc = SVC()
svc.fit(x_train, y_train)
pred_svc = svc.predict(x_test)
acc_svc = accuracy_score(pred_svc, y_test)
acc_svc * 100
# confusion matrix
cm_svc = confusion_matrix(y_test, pred_svc)
print("Confusion Matrix:\n", cm_svc)
# precision
prec_svc = cm_svc[0][0] * 100 / (cm_svc[0][0] + cm_svc[0][1])
print("Precision:", prec_svc)
# recall
recall_svc = cm_svc[0][0] * 100 / (cm_svc[0][0] + cm_svc[1][0])
print("recall :", recall_svc)
# F-1 Score
f1_svc = 2 * prec_svc * recall_svc / (recall_svc + prec_svc)
print("F1 Score:", f1_svc)
# False Negative Score
fnr_svc = cm_svc[1][0] * 100 / (cm_svc[0][0] + cm_svc[1][0])
print("False Negative rate : ", fnr_svc)
# AUC ROC curve
fpr, tpr, thresholds = roc_curve(y_test, pred_svc)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, color="darkorange", lw=2, label="ROC curve (area = %0.2f)" % roc_auc)
plt.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend(loc="lower right")
plt.show()
df = pandas.DataFrame(
{
"random forest": [accuracy_rf * 100, prec_rf, recall_rf, f1_rf, fnr_rf, 0.79],
"naive bayes": [accuracy_nb * 100, "nan", "nan", "nan", "nan", "nan"],
"Adaboost": [accuracy_ada * 100, prec_ada, recall_ada, f1_ada, fnr_ada, 0.67],
"Decision Tree": [accuracy_dtc * 100, prec_dt, recall_dt, f1_dt, fnr_dt, 0.77],
"SVC": [acc_svc * 100, prec_svc, recall_svc, f1_svc, fnr_svc, 0.77],
},
index=[
"Accuracy",
"Precision",
"Recall",
"F-1 Score",
"False Negative Rate",
"ROC curve area",
],
)
df
|
# # Image Matching 2023 rotation matrix
# The rotation_matrix and translation_vector are matrices and vectors used to represent 3D geometric transformations. rotation_matrix and translation_vector are commonly used to compose 3D geometric transformations. For example, it is used when translating after rotating a 3D object. By combining these transformations, you can change the position and pose of 3D objects.
# ## rotation_matrix:
# A rotation matrix is a matrix for rotating a point or object in 3D space. It is usually represented as a 3x3 square matrix. A rotation matrix is defined by an axis of rotation and an angle. Rotation matrices allow you to rotate 3D points and objects.
# ## translation_vector:
# A translation vector is a vector for translating a point or object in 3D space. It is usually represented as a 3-dimensional column vector. Translation vectors describe the respective amounts of translation along the x-, y-, and z-axes. A translation vector allows you to translate a 3D point or object in a specified direction.
#
import os
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
train_labels = pd.read_csv(
"/kaggle/input/image-matching-challenge-2023/train/train_labels.csv"
)
display(train_labels[0:3])
data = train_labels[train_labels["dataset"] == "urban"][
train_labels["scene"] == "kyiv-puppet-theater"
]
display(data[0:3])
print(data.columns.tolist())
# # images
paths = []
files = []
for dirname, _, filenames in os.walk(
"/kaggle/input/image-matching-challenge-2023/train/urban/kyiv-puppet-theater/images"
):
for filename in filenames:
paths += [(os.path.join(dirname, filename))]
files += [filename]
fig, axs = plt.subplots(5, 6, figsize=(12, 12))
for i, ax in enumerate(axs.flat):
if i < len(paths):
img = mpimg.imread(paths[i])
ax.imshow(img)
ax.axis("off")
plt.show()
full_paths = []
full_files = []
for dirname, _, filenames in os.walk(
"/kaggle/input/image-matching-challenge-2023/train/urban/kyiv-puppet-theater/images_full_set"
):
for filename in filenames:
full_paths += [(os.path.join(dirname, filename))]
full_files += [filename]
print(len(files), len(full_files))
print(set(files) & set(full_files) == set(files))
# fig, axs = plt.subplots(5, 6, figsize=(10, 10))
# for i, ax in enumerate(axs.flat):
# if i < len(full_paths):
# img = mpimg.imread(full_paths[i])
# ax.imshow(img)
# ax.axis('off')
# plt.show()
# # train_labels info
dir0 = "/kaggle/input/image-matching-challenge-2023/train/"
print(data.iloc[0, 2]) #'image_path'
print(data.iloc[0, 3]) #'rotation_matrix'
print(data.iloc[0, 4]) #'translation_vector'
path0 = dir0 + data.iloc[0, 2]
img = mpimg.imread(path0)
plt.imshow(img)
plt.axis("off")
plt.show()
matrixs = data.iloc[0, 3].split(";")
M = []
for m in matrixs:
M += [float(m)]
rotation_matrix = np.array(M).reshape(3, 3)
print(rotation_matrix)
# [cos(theta_x), -sin(theta_x), 0]
# [cos(theta_y), -sin(theta_y), 0]
# [cos(theta_z), -sin(theta_z), 0]
# theta_x represents the rotation angle around the x-axis.
# theta_y represents the rotation angle around the y-axis.
# theta_z represents the rotation angle around the z-axis.
import math
R = rotation_matrix
theta_x = math.atan2(R[2, 1], R[2, 2])
theta_y = math.asin(-R[2, 0])
theta_z = math.atan2(R[1, 0], R[0, 0])
print(theta_x, theta_y, theta_z)
matrix = rotation_matrix
is_square = matrix.shape[0] == matrix.shape[1]
is_orthogonal = np.allclose(np.dot(matrix, matrix.T), np.eye(3)) and np.allclose(
np.linalg.norm(matrix, axis=0), 1
)
is_determinant_one = np.isclose(np.linalg.det(matrix), 1)
is_rotation_matrix = is_square and is_orthogonal and is_determinant_one
print("is_rotation_matrix:", is_rotation_matrix)
vectors = data.iloc[0, 4].split(";")
V = []
for v in vectors:
V += [float(v)]
translation_vector = np.array(V)
print(translation_vector)
# # rotate image to create rotated image
# before
plt.imshow(img)
plt.axis("off")
plt.show()
# theta_z represents the rotation angle around the z-axis.
height, width = img.shape[:2]
theta_z = np.pi / 6
R_z = np.array(
[
[np.cos(theta_z), -np.sin(theta_z), 0],
[np.sin(theta_z), np.cos(theta_z), 0],
[0, 0, 1],
]
)
rotated_img2 = cv2.warpAffine(img, R_z[:2, :], (width, height))
plt.imshow(rotated_img2)
plt.axis("off")
plt.show()
# the given rotation_matrix affect to the given img
rotated_img = cv2.warpAffine(img, rotation_matrix[:2, :], (width, height))
plt.imshow(rotated_img)
plt.axis("off")
plt.show()
# camera text
camera = pd.read_csv(
"/kaggle/input/image-matching-challenge-2023/train/urban/kyiv-puppet-theater/sfm/cameras.txt",
sep=" ",
)
display(camera)
# images txt
with open(
"/kaggle/input/image-matching-challenge-2023/train/urban/kyiv-puppet-theater/sfm/images.txt",
"r",
) as file:
file_contents = file.read()
print(file_contents[:1000])
# points3D txt
with open(
"/kaggle/input/image-matching-challenge-2023/train/urban/kyiv-puppet-theater/sfm/points3D.txt",
"r",
) as file:
file_contents = file.read()
print(file_contents[:1000])
# scale_factor txt
with open(
"/kaggle/input/image-matching-challenge-2023/train/urban/kyiv-puppet-theater/sfm/scale_factor.txt",
"r",
) as file:
file_contents = file.read()
print(file_contents[:10])
|
import numpy as np
import pandas as pd
import os
import tensorflow as tf
from tensorflow.keras.layers import (
Input,
MaxPooling2D,
BatchNormalization,
GlobalAveragePooling2D,
Dense,
Conv2D,
Dropout,
Flatten,
Activation,
ZeroPadding2D,
Add,
)
from tensorflow.keras import activations
from tensorflow.keras.models import Model
def res_identity(x, filters):
x_skip = x # this will be used for addition with the residual block
f1, f2 = filters
# first block
x = Conv2D(f1, kernel_size=(1, 1), strides=(1, 1), padding="valid")(x)
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
# second block # bottleneck (but size kept same with padding)
x = Conv2D(f1, kernel_size=(3, 3), strides=(1, 1), padding="SAME")(x)
x = BatchNormalization()(x)
x = Activation(activations.relu)(x)
# third block activation used after adding the input
x = Conv2D(f2, kernel_size=(1, 1), strides=(1, 1), padding="valid")(x)
x = BatchNormalization()(x)
# x = Activation(activations.relu)(x)
# add the input
x = Add()([x, x_skip])
x = Activation(activations.relu)(x)
return x
def res_conv(x, s, filters):
x_skip = x
f1, f2 = filters
# first block
x = Conv2D(f1, kernel_size=(1, 1), strides=(s, s), padding="valid")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# second block
x = Conv2D(f1, kernel_size=(3, 3), strides=(1, 1), padding="SAME")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# third block
x = Conv2D(f2, kernel_size=(1, 1), strides=(1, 1), padding="valid")(x)
x = BatchNormalization()(x)
# shortcut
x_skip = Conv2D(f2, kernel_size=(1, 1), strides=(s, s), padding="valid")(x_skip)
x_skip = BatchNormalization()(x_skip)
# add
x = Add()([x, x_skip])
x = Activation(activations.relu)(x)
return x
def squeeze_and_excite(tensor):
x = GlobalAveragePooling2D()(tensor)
x2 = Dense(x.shape[1] // 8, activation="relu")(x)
x2 = Dense(x.shape[1], activation="sigmoid")(x2)
x2 = tf.keras.layers.Reshape((1, 1, x.shape[1]), input_shape=(x.shape[1],))(x2)
x = tf.keras.layers.multiply((x2, tensor))
return x
inputs = Input(shape=(224, 224, 3))
x = ZeroPadding2D(padding=(3, 3))(inputs)
x = Conv2D(64, kernel_size=(7, 7), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = res_conv(x, s=1, filters=(64, 256))
x = res_identity(x, filters=(64, 256))
x = res_identity(x, filters=(64, 256))
x = squeeze_and_excite(x)
x = res_conv(x, s=2, filters=(128, 512))
x = res_identity(x, filters=(128, 512))
x = res_identity(x, filters=(128, 512))
x = res_identity(x, filters=(128, 512))
x = squeeze_and_excite(x)
x = res_conv(x, s=2, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = res_identity(x, filters=(256, 1024))
x = squeeze_and_excite(x)
x = res_conv(x, s=2, filters=(512, 2048))
x = res_identity(x, filters=(512, 2048))
x = res_identity(x, filters=(512, 2048))
x = GlobalAveragePooling2D()(x)
outputs = Dense(11, activation="sigmoid")(x) # multi-class
model = Model(inputs=inputs, outputs=outputs, name="Resnet50")
model.summary()
# # Here is the pre-trained ResNet50 from Keras
pretrained = tf.keras.applications.ResNet50(include_top=False, weights="imagenet")(
inputs
)
x = GlobalAveragePooling2D()(pretrained)
outputs = Dense(11, activation="sigmoid")(x)
resnet50 = Model(inputs=inputs, outputs=outputs, name="Resnet50_Keras")
resnet50.summary()
|
# ## About the problem
# We are provided a dataset with synthetic urine specimens, and the aim of the competition is to predict the probability of finding a kidney stone in the specimen.
# The dataset is generated from a DL model trained with another dataset ('Kidney Stone Prediction based on Urine Analysis') with some differences; we are allowed to use the original dataset if neccessary.
# ## Description of the columns
# - gravity: *specific gravity*, relative density to water, so $\phi_{urine}/d_{water}$.
# - ph: *pH*, $-\log([H^+])$, measures the acidity of the solution.
# - osmo: *osmolarity*, it is proportional to the concentration of molecules in solution, measured in units of mOsm.
# - cond: *conductivity*, it is proportional to the concetration of charged ions in solution, in units of mMho. Here, Mho is the inverse Ohm.
# - urea: *urea concentration*, in millimoles per litre.
# - calc: *calcium concentration*, in millimoles per litre.
# - target: 1 for kidney stone, 0 for no kidney stone.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
import xgboost as xgb
data = pd.read_csv("../input/playground-series-s3e12/train.csv", index_col="id")
test = pd.read_csv("../input/playground-series-s3e12/test.csv", index_col="id")
# ## Cleaning
# - Check for duplicated rows.
# - Check for null values.
print(f"Duplicates: {data.duplicated().sum()}")
print(f"Null:\n{data.isnull().sum()}")
# ## Data Analysis
sns.pairplot(data)
sns.pairplot(test)
cm = data.corr()
plt.figure(figsize=(12, 6))
sns.heatmap(cm, annot=True, square=True, linewidths=1, linecolor="gray")
plt.show()
# ## Metric
# The competition uses the area under the ROC curve between the predicted probability and the observed target as a metric.
# Fortunately, sklearn has this metric impletemented already for us, `sklearn.metrics.roc_auc_score`.
X = data
y = X.pop("target")
model = xgb.XGBRegressor(
n_estimators=1000, eta=0.02, random_state=1, objective="binary:logistic"
)
model.fit(X, y)
results = model.predict(test)
sub = pd.DataFrame(index=test.index, data=results, columns=["target"])
sub.head()
sub.to_csv(r"submission.csv")
|
import pandas as pd
import numpy as np
def number_to_filename(number):
filename = f"{number:06d}.png"
path = "/kaggle/input/spr-x-ray-gender/kaggle/kaggle/train/"
filename = path + filename
return filename
train_gender_df = pd.read_csv("/kaggle/input/spr-x-ray-gender/train_gender.csv")
train_gender_df["filepath"] = train_gender_df["imageId"].apply(number_to_filename)
train_gender_df.head(10)
train_gender_df["gender"].hist()
from matplotlib import pyplot as plt
import cv2
img = cv2.imread(train_gender_df["filepath"][0], 0)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.title(str(train_gender_df["gender"][0]))
img = cv2.imread(train_gender_df["filepath"][0], 0)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.title(str(train_gender_df["gender"][1]))
import os
import shutil
import tempfile
import matplotlib.pyplot as plt
import PIL
import torch
import numpy as np
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
import torchvision.transforms as transforms
from monai.data.image_reader import PILReader
from monai.apps import download_and_extract
from monai.config import print_config
from monai.data import decollate_batch, DataLoader
from monai.metrics import ROCAUCMetric
from monai.networks.nets import DenseNet121
from monai.transforms import (
Activations,
EnsureChannelFirst,
AsDiscrete,
Compose,
LoadImage,
Lambdad,
RandFlip,
RandRotate,
RandZoom,
Resize,
ScaleIntensity,
)
from monai.utils import set_determinism
print_config()
num_class = 2
train_transforms = Compose(
[
LoadImage(
reader=PILReader(converter=lambda image: image.convert("L")),
image_only=True,
),
EnsureChannelFirst(),
Resize(spatial_size=(256, 256)),
ScaleIntensity(),
RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True),
RandFlip(spatial_axis=0, prob=0.5),
RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5),
]
)
val_transforms = Compose(
[
LoadImage(
reader=PILReader(converter=lambda image: image.convert("L")),
image_only=True,
),
EnsureChannelFirst(),
Resize(spatial_size=(256, 256)),
ScaleIntensity(),
]
)
y_pred_trans = Compose([Activations(softmax=True)])
y_trans = Compose([AsDiscrete(to_onehot=num_class)])
filenames = train_gender_df["filepath"].tolist()
target = train_gender_df["gender"].tolist()
train_x, val_x, train_y, val_y = train_test_split(
filenames, target, stratify=target, test_size=0.3, random_state=0
)
test_df = pd.read_csv("/kaggle/input/spr-x-ray-gender/sample_submission_gender.csv")
def testnumber_to_filename(number):
filename = f"{number:06d}.png"
path = "/kaggle/input/spr-x-ray-gender/kaggle/kaggle/test/"
filename = path + filename
return filename
test_df["filepath"] = test_df["imageId"].apply(testnumber_to_filename)
test_x = test_df["filepath"].tolist()
test_y = test_df["gender"].tolist()
class SPRXrayGenderDataset(torch.utils.data.Dataset):
def __init__(self, image_files, labels, transforms):
self.image_files = image_files
self.labels = labels
self.transforms = transforms
def __len__(self):
return len(self.image_files)
def __getitem__(self, index):
return self.transforms(self.image_files[index]), self.labels[index]
train_ds = SPRXrayGenderDataset(train_x, train_y, train_transforms)
train_loader = DataLoader(train_ds, batch_size=256, shuffle=True)
val_ds = SPRXrayGenderDataset(val_x, val_y, val_transforms)
val_loader = DataLoader(val_ds, batch_size=256)
test_ds = SPRXrayGenderDataset(test_x, test_y, val_transforms)
test_loader = DataLoader(test_ds, batch_size=256)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=num_class).to(device)
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), 1e-5)
max_epochs = 50
val_interval = 1
auc_metric = ROCAUCMetric()
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = []
metric_values = []
for epoch in range(max_epochs):
print("-" * 10)
print(f"epoch {epoch + 1}/{max_epochs}")
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = batch_data[0].to(device), batch_data[1].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
print(
f"{step}/{len(train_ds) // train_loader.batch_size}, "
f"train_loss: {loss.item():.4f}"
)
epoch_len = len(train_ds) // train_loader.batch_size
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
if (epoch + 1) % val_interval == 0:
model.eval()
with torch.no_grad():
y_pred = torch.tensor([], dtype=torch.float32, device=device)
y = torch.tensor([], dtype=torch.long, device=device)
for val_data in val_loader:
val_images, val_labels = (
val_data[0].to(device),
val_data[1].to(device),
)
y_pred = torch.cat([y_pred, model(val_images)], dim=0)
y = torch.cat([y, val_labels], dim=0)
y_onehot = [y_trans(i) for i in decollate_batch(y, detach=False)]
y_pred_act = [y_pred_trans(i) for i in decollate_batch(y_pred)]
auc_metric(y_pred_act, y_onehot)
result = auc_metric.aggregate()
auc_metric.reset()
del y_pred_act, y_onehot
metric_values.append(result)
acc_value = torch.eq(y_pred.argmax(dim=1), y)
acc_metric = acc_value.sum().item() / len(acc_value)
if result > best_metric:
best_metric = result
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), "best_metric_model.pth")
print("saved new best metric model")
print(
f"current epoch: {epoch + 1} current AUC: {result:.4f}"
f" current accuracy: {acc_metric:.4f}"
f" best AUC: {best_metric:.4f}"
f" at epoch: {best_metric_epoch}"
)
print(
f"train completed, best_metric: {best_metric:.4f} " f"at epoch: {best_metric_epoch}"
)
plt.figure("train", (12, 6))
plt.subplot(1, 2, 1)
plt.title("Epoch Average Loss")
x = [i + 1 for i in range(len(epoch_loss_values))]
y = epoch_loss_values
plt.xlabel("epoch")
plt.plot(x, y)
plt.subplot(1, 2, 2)
plt.title("Val AUC")
x = [val_interval * (i + 1) for i in range(len(metric_values))]
y = metric_values
plt.xlabel("epoch")
plt.plot(x, y)
plt.show()
model.load_state_dict(torch.load("best_metric_model.pth"))
model.eval()
y_true = []
y_pred = []
with torch.no_grad():
for test_data in test_loader:
test_images, test_labels = (
test_data[0].to(device),
test_data[1].to(device),
)
pred = model(test_images).argmax(dim=1)
for i in range(len(pred)):
y_true.append(test_labels[i].item())
y_pred.append(pred[i].item())
test_df["gender"] = y_pred
test_df[["imageId", "gender"]].to_csv("submission.csv", index=False)
|
import warnings
import nltk
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from gensim.models import Word2Vec, KeyedVectors
warnings.filterwarnings("ignore")
# # stemming
word = ["change", "changing", "change", "changed"]
word
#!pip install nltk
ps = PorterStemmer()
for w in word:
print(ps.stem(w))
for w in word:
print(w, ps.stem(w))
sen = "I want to change the world if world changed my career by changing abcd"
nltk.download("punkt")
toke = word_tokenize(sen)
toke
sen.split() # or we can split this using split fanction
for w in toke:
print(w, ps.stem(w))
le = WordNetLemmatizer()
toke
nltk.download("omw-1.4")
for w in toke:
print(w, le.lemmatize(w))
le.lemmatize("changes")
# # Text Vectorizer
df = pd.read_csv("/kaggle/input/vactorizer-dataset1/data.csv")
df
# # CountVectorizer
cv = CountVectorizer()
cv_x = cv.fit_transform(df["test"])
cv_x
cv_x.toarray()
cv.get_feature_names()
cv_df = pd.DataFrame(cv_x.toarray(), columns=cv.get_feature_names(), index=df["test"])
cv_df
cv_df = pd.DataFrame(cv_x.toarray(), columns=cv.get_feature_names())
cv_df
# # TfidfVectorizer
tf = TfidfVectorizer()
tf_z = tf.fit_transform(df["test"])
tf_z
cv_df = pd.DataFrame(tf_z.toarray(), columns=tf.get_feature_names(), index=df["test"])
cv_df
#!pip install gensim
text_vector = [nltk.word_tokenize(test) for test in df["test"]]
text_vector
model = Word2Vec(text_vector, min_count=1)
model.wv.most_similar("want")
|
# # Introduction
# In this notebook we will analyse word2ved for countires wikipedia page.
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import numpy as np
import matplotlib.pyplot as plt
from gensim.models.callbacks import CallbackAny2Vec
from gensim.models import Word2Vec, KeyedVectors
from tensorflow.keras.layers import Embedding
# imports
import json
from collections import Counter
from gensim.models import Word2Vec
import matplotlib.pyplot as plt
import json
# # Basic Analysis of Data
class MetricCallback(CallbackAny2Vec):
"""
Callback to print loss after each epoch
"""
def __init__(self, every=10):
self.myloss = []
self.epoch = 0
self.every = every
def on_epoch_end(self, model):
loss = model.get_latest_training_loss()
if self.epoch == 0:
self.myloss.append(loss)
else:
self.myloss.append(loss - self.loss_previous_step)
if self.epoch % self.every == 0:
print(f"Loss after epoch {self.epoch}: {self.myloss[-1]}") # NOQA: T001
self.epoch += 1
self.loss_previous_step = loss
def plot_arrows(starts, ends, wv, estimator=PCA, **kwargs):
if len(starts) != len(ends):
raise ValueError("starts and ends must be the same length.")
fig, ax = plt.subplots(figsize=kwargs.pop("figsize", (8, 8)))
X = wv[starts + ends] # NOQA: N806
x_red = estimator(n_components=2).fit_transform(X)
plt.scatter(*x_red.T)
for i, word in enumerate(starts + ends):
plt.annotate(word, x_red[i])
xstart = x_red[: len(starts)]
xend = x_red[len(starts) :]
for i, (start, end) in enumerate(zip(starts, ends)):
x1, y1 = xstart[i]
x2, y2 = xend[i]
plt.arrow(x1, y1, x2 - x1, y2 - y1)
def plot_vectors(words, model, estimator=TSNE, **kwargs):
names = []
vectors = []
for word in words:
if word in model.wv:
names.append(word)
vectors.append(model.wv[word])
X = np.r_[vectors] # NOQA: N806
x_red = estimator(n_components=2).fit_transform(X)
fig, ax = plt.subplots(figsize=kwargs.pop("figsize", (16, 16))) # NOQA: E912
ax.scatter(*x_red.T)
for i, word in enumerate(names):
plt.annotate(word, x_red[i])
def make_embedding_layer(model, tokenizer, MAX_SEQUENCE_LENGTH): # NOQA: N803
word_index = tokenizer.word_index
if isinstance(model, Word2Vec):
wv = model.wv
elif isinstance(model, KeyedVectors):
wv = model
embedding_matrix = np.zeros((len(word_index) + 1, wv.vector_size))
for word, i in word_index.items():
try:
vector = wv.get_vector(word, False)
embedding_matrix[i] = vector
except KeyError:
continue
el = Embedding(
len(word_index) + 1,
wv.vector_size,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False,
)
return el
file_path = "/kaggle/input/countries-with-wikipedia-content/countries.json"
with open(file_path, "r") as file:
file_content = file.read()
# Load the JSON data from the file content
countries = json.loads(file_content)
countries["India"][:20]
print(" ".join(countries["India"])[:512] + " ...")
for i, (country, text) in enumerate(countries.items()):
print(country)
print(" ".join(text)[:512] + " ...")
print("-" * 100)
if i >= 5:
break
# # Basic Word2Vec Usage
# Create and train a simple model
model = Word2Vec(sentences=countries.values())
# Check word similarities learnt by the model
model.wv.most_similar("India", topn=5)
# Enable computation of loss
model = Word2Vec(sentences=countries.values(), compute_loss=True)
model.get_latest_training_loss()
# # Heuristics for Word2vec algorithms
# How many unique words in the vocabulary?
counter = Counter()
for words in countries.values():
for word in words:
counter.update([word])
print(len(counter))
# Default vocabulary size of the original model
print(len(model.wv.index_to_key))
# Retrain - increased vocabulary size, more epochs, larger word vectors
# Note: Here MetricCallback is directly called as there is no utils package here.
metric = MetricCallback(every=1)
model = Word2Vec(
sentences=countries.values(),
vector_size=128,
max_vocab_size=65536,
compute_loss=True,
callbacks=[metric],
)
plt.plot(metric.myloss)
# Check similarities again
model.wv.most_similar("India")
# Retrain - more epochs
# Note: Here MetricCallback is directly called as there is no utils package here.
metric = MetricCallback(every=1)
model = Word2Vec(
sentences=countries.values(),
vector_size=128,
max_vocab_size=65536,
compute_loss=True,
callbacks=[metric],
epochs=10,
min_alpha=0.001,
workers=9,
)
plt.plot(metric.myloss)
model.wv.most_similar("India")
# Examine the vector space
X = ["India", "Pakistan", "Bangladesh", "France", "England", "Spain"]
Y = ["Delhi", "Islamabad", "Dhaka", "Paris", "London", "Madrid"]
plot_arrows(X, Y, model.wv)
# Visualize vectors for all countries
plot_vectors(countries, model)
# # Word Analogies
# India: Ganges -> Brazil: __ ?
model.wv.most_similar(positive=["Ganges", "Brazil"], negative=["India"])
# America: Washington -> France: __ ?
model.wv.most_similar(positive=["Washington", "France"], negative=["America"])
# India: Hindi -> Germany: __ ?
model.wv.most_similar(positive=["Hindi", "Germany"], negative=["India"])
# Save the model
model.save("wiki-countries.w2v")
from gensim.models import KeyedVectors
model = KeyedVectors.load("wiki-countries.w2v")
model
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **importing the file**
file = pd.read_csv("/kaggle/input/netflix-shows/netflix_titles.csv")
file.head()
# **chcking the info of the file**
file.info()
# **chevking the count of null values**
file.isnull().sum()
# **dealing with the null values**
file["director"].fillna("director unavailable", inplace=True)
file["cast"].fillna("cast unavailable", inplace=True)
file["country"].fillna("country unavailable", inplace=True)
file.dropna(subset=["date_added", "rating", "duration"], inplace=True)
file.info()
# **changing the data types to reduce the memory usage**
file["type"].value_counts()
file["date_added"] = pd.to_datetime(file["date_added"])
# file["release_year"] = pd.to_datetime(file["release_year"])
file["type"] = file["type"].astype("category")
file.info()
# **checking for duplicate rows and columns**
file.drop_duplicates()
# **visualizing the data**
import matplotlib.pyplot as plt
import seaborn as sns
plt.pie(
file["type"].value_counts(),
labels=file["type"].unique(),
autopct="%0.f%%",
colors=["lightblue", "blue"],
)
plt.show()
# The above pie chart shows that 70% of content available on netflix is Movies and rest 30% is TV show
#
y = file["type"].value_counts()
x = file["type"].unique()
plt.bar(x, y, color="grey")
# **top 10 content producing countries on netflix**
top_country = file["country"].value_counts().sort_values(ascending=False).head(10)
top_country.plot(kind="barh", color="grey")
# **top 10 movie producing countries**
movies = file[file["type"] == "Movie"]
top_movie_producing_country = (
movies.groupby("country")["show_id"].count().sort_values(ascending=False).head(10)
)
top_movie_producing_country.plot(kind="barh", color="grey")
# **top 10 TV shows producing countries**
TV_shows = file[file["type"] == "TV Show"]
top_TV_Show_producing_country = (
TV_shows.groupby("country")["show_id"].count().sort_values(ascending=False).head(10)
)
top_TV_Show_producing_country.plot(kind="barh", color="grey")
# **growth of movies on netflix**
movies = file[file["type"] == "Movie"]
growth = movies.groupby("release_year")["show_id"].count()
growth.plot(kind="line", color="red")
plt.show()
# **growth of TV shows on netflix**
TV_shows = file[file["type"] == "TV Show"]
growth = TV_shows.groupby("release_year")["show_id"].count()
growth.plot(kind="line", color="red")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
from platform import python_version
python_version()
# Some stuff from the Feature Engineering Course:
from sklearn.feature_selection import mutual_info_regression
def make_mi_scores(X, y, discrete_features):
mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
# Some relevant info from google: female mosquito lifetime is 42-56 days, male is 10
# That is approximately 7 weeks for female mosquitos. So probably don't need to consider lags beyond that for weather features.
dengue_features_train = pd.read_csv(
"../input/epidemy/dengue_features_train.csv",
parse_dates=True,
infer_datetime_format=True,
)
dengue_features_test = pd.read_csv(
"../input/epidemy/dengue_features_test.csv",
parse_dates=True,
infer_datetime_format=True,
)
dengue_labels_train = pd.read_csv(
"../input/epidemy/dengue_labels_train.csv",
parse_dates=True,
infer_datetime_format=True,
)
for col in dengue_features_train.columns:
print(col)
from datetime import date, timedelta
import datetime
import math
debug_date_map = True
# store information on
date_map_debug_logs = []
set_of_bad_years = set()
# map from dataframe with year, weekofyear features
# to a time index
# lets go with weeks since day 1 of week 1 of 1990
def date_start_map(x):
year_one = 1990
week_one = 1
day_of_week = 1
# Trying thursday
date_one = datetime.datetime.strptime(
f"{year_one}-U{week_one}-{day_of_week}", "%Y-U%U-%w"
).date()
year_current = x["year"]
week_current = x["weekofyear"]
# trouble years are those where this data set incorrectly assigns a week at the
# beginning to the end. I've identified these as those starting with Friday, Saturday,
# or Sunday
raw_year_current = year_current
raw_week_current = week_current
trouble_year = False
shift = 0
year_start_date = datetime.datetime.strptime(
f"{year_current}-M{1}-{1}", "%Y-M%m-%d"
).date()
date_two = datetime.datetime.strptime(
f"{year_current}-U{week_current}-{day_of_week}", "%Y-U%U-%w"
).date()
if year_start_date.weekday() >= 4:
trouble_year = True
set_of_bad_years.add(year_current)
# need better check
if week_current >= 52:
year_current -= 1
# trying to assign to Dec 31
date_two = datetime.datetime.strptime(
f"{year_current}-M{12}-{31}", "%Y-M%m-%d"
).date()
else:
shift = 1
date_two = datetime.datetime.strptime(
f"{year_current}-U{week_current}-{day_of_week}", "%Y-U%U-%w"
).date()
time_d = date_two - date_one
weeks = math.ceil(time_d.days / 7.0)
weeks += shift
if debug_date_map:
debug_log = f"{raw_year_current}-{raw_week_current} mapped to week {weeks} using days {time_d.days}"
date_map_debug_logs.append(debug_log)
return weeks
startmap = date_start_map
dengue_features_train["weeks_since_start"] = dengue_features_train.apply(
startmap, axis=1
)
dengue_features_test["weeks_since_start"] = dengue_features_test.apply(startmap, axis=1)
dengue_labels_train["weeks_since_start"] = dengue_labels_train.apply(startmap, axis=1)
dengue_features_sj_train = pd.DataFrame(
dengue_features_train[dengue_labels_train["city"] == "sj"]
)
dengue_features_sj_test = pd.DataFrame(
dengue_features_test[dengue_labels_train["city"] == "sj"]
)
dengue_labels_sj_train = pd.DataFrame(
dengue_labels_train[dengue_labels_train["city"] == "sj"]
)
dengue_features_iq_train = pd.DataFrame(
dengue_features_train[dengue_labels_train["city"] == "iq"]
)
dengue_features_iq_test = pd.DataFrame(
dengue_features_test[dengue_labels_train["city"] == "iq"]
)
dengue_labels_iq_train = pd.DataFrame(
dengue_labels_train[dengue_labels_train["city"] == "iq"]
)
list_of_frames = [
dengue_features_sj_train,
dengue_features_sj_test,
dengue_labels_sj_train,
dengue_features_iq_train,
dengue_features_iq_test,
dengue_labels_iq_train,
]
for frame in list_of_frames:
frame.sort_values("weeks_since_start", inplace=True)
frame.set_index("weeks_since_start", inplace=True)
ax = dengue_labels_sj_train.loc[:, ["total_cases"]].plot(figsize=(15, 5))
dengue_labels_iq_train.loc[:, ["total_cases"]].plot(ax=ax)
ax.legend(["San Juan", "Iquitos"])
ax.set(xlabel="Week", ylabel="Cases per Week")
plt.savefig("cases_data.png")
start_week = 50
end_week = 500
features_to_plot = [
"reanalysis_relative_humidity_percent",
"reanalysis_precip_amt_kg_per_m2",
"reanalysis_dew_point_temp_k",
"reanalysis_specific_humidity_g_per_kg",
"reanalysis_tdtr_k",
"station_min_temp_c",
]
ax = dengue_labels_sj_train.loc[start_week:end_week, ["total_cases"]].plot(
figsize=(15, 5)
)
dengue_features_sj_train.loc[start_week:end_week, features_to_plot].plot(ax=ax)
plt.savefig("test_feature_cmp_sj.png")
from learntools.time_series.utils import plot_lags, make_lags, make_leads
from statsmodels.graphics.tsaplots import plot_pacf
plot_pacf(dengue_labels_sj_train["total_cases"], lags=14)
plt.savefig("cases_sj_pacf.png")
# # Variables
#
# Averaging Vegetation Index data
ndvi_features = ["ndvi_se", "ndvi_sw", "ndvi_ne", "ndvi_nw"]
dengue_features_sj_train_ndvi_mean = dengue_features_sj_train[ndvi_features].mean(
axis=1
)
dengue_features_sj_train["ndvi_mean"] = dengue_features_sj_train_ndvi_mean
precip_variable = "reanalysis_sat_precip_amt_mm"
ndvi_variable = "ndvi_mean"
rel_hum_variable = "reanalysis_relative_humidity_percent"
dew_point_variable = "reanalysis_dew_point_temp_k"
mean_temp_variable = "reanalysis_avg_temp_k"
variables_of_interest = [
precip_variable,
ndvi_variable,
rel_hum_variable,
dew_point_variable,
mean_temp_variable,
]
# for labelling
varnames_of_interest = ["precip", "ndvi", "rel_hum", "dew_pt", "mean_temp"]
vartitles_of_interest = [
"Precipitation",
"Vegetation Index",
"Relative Humidity",
"Dew Point",
"Mean Temp",
]
smoothed_features = []
for i in range(len(variables_of_interest)):
variable = variables_of_interest[i]
varname = varnames_of_interest[i]
vartitle = vartitles_of_interest[i]
variable_rolling = (
dengue_features_sj_train[variable].rolling(window=5, center=False).mean()
)
smoothed_features.append(variable_rolling)
ax = dengue_features_sj_train[variable].plot(label=vartitle)
variable_rolling.plot(ax=ax, label="%s (smoothed)" % (vartitle))
ax.legend()
# ax.set_ylabel(vartitle_of_interest[i])
plt.savefig("variable_%s_smoothed.png" % (varname))
ax.clear()
# # Section: Target and Feature Lags
y_sj = dengue_labels_sj_train["total_cases"]
target_lag_number = 12
y_sj_lags = make_lags(dengue_labels_sj_train["total_cases"], lags=target_lag_number)
# can use code from Time Series course
n_lags = 24
lags_list = []
lags_list_no_target = []
for i in range(len(variables_of_interest)):
var_lags = make_lags(
dengue_features_sj_train[variables_of_interest[i]],
lags=n_lags,
name=varnames_of_interest[i],
)
lags_list.append(var_lags)
lags_list_no_target.append(var_lags)
# add target lags
lags_list.append(y_sj_lags)
smoothed_lags_list = []
smoothed_lags_list_no_target = []
for i in range(len(variables_of_interest)):
var_lags = make_lags(
smoothed_features[i], lags=n_lags, name="%s_smooth" % (varnames_of_interest[i])
)
print(type(var_lags))
smoothed_lags_list.append(var_lags)
smoothed_lags_list_no_target.append(var_lags)
smoothed_lags_list.append(y_sj_lags)
print(type(lags_list[0]))
y_sj_deriv = dengue_labels_sj_train["total_cases"] - y_sj_lags["y_lag_1"]
y_sj_deriv_smooth_gauss_mean = y_sj_deriv.rolling(
window=7, center=False, win_type="gaussian"
).mean(std=3)
y_sj_deriv_smooth_const_mean = y_sj_deriv.rolling(window=7, center=False).mean(std=3)
START = 50
END = 500
ax = y_sj_deriv.loc[START:END].plot(figsize=(15, 5), color="black", alpha=0.3)
y_sj_deriv_smooth_gauss_mean.loc[START:END].plot(
ax=ax, label="Gaussian Smooth", color="red"
)
y_sj_deriv_smooth_const_mean.loc[START:END].plot(
ax=ax, label="Flat Window", color="purple"
)
ax.set(xlabel="Week", ylabel="Change in Cases per Week")
plt.savefig("deriv_smoothing.png")
# print(type(y_sj_deriv_smooth_gauss_mean))
# y_sj_deriv_smooth_gauss_mean_frame=pd.DataFrame(y_sj_deriv_smooth_gauss_mean.shift(1),columns=['target_deriv_smooth_gauss_mean'])
# print(y_sj_deriv_smooth_gauss_mean_frame)
# y_sj_deriv_smooth_const_mean_frame=pd.DataFrame(y_sj_deriv_smooth_const_mean.shift(1),columns=['target_deriv_smooth_const_mean'])
# print(y_sj_deriv_smooth_const_mean_frame)
y_sj_deriv_smooth_gauss_mean_frame = pd.DataFrame(
y_sj_deriv_smooth_gauss_mean, columns=["target_deriv_smooth_gauss_mean"]
)
# print(y_sj_deriv_smooth_gauss_mean_frame)
y_sj_deriv_smooth_const_mean_frame = pd.DataFrame(
y_sj_deriv_smooth_const_mean, columns=["target_deriv_smooth_const_mean"]
)
# print(y_sj_deriv_smooth_const_mean_frame)
print(y_sj_deriv_smooth_gauss_mean_frame.head())
# need to shift by one to make sure it doesn't include the current or future information
# Or, apply make lags on this.
target_deriv_smooth_gauss_lags = make_lags(
y_sj_deriv_smooth_gauss_mean_frame["target_deriv_smooth_gauss_mean"],
lags=n_lags,
name="target_deriv_smooth_gauss_mean",
)
target_deriv_smooth_const_lags = make_lags(
y_sj_deriv_smooth_const_mean_frame["target_deriv_smooth_const_mean"],
lags=n_lags,
name="target_deriv_smooth_const_mean",
)
print(type(target_deriv_smooth_const_lags))
print(type(target_deriv_smooth_gauss_lags))
# Verifying that the shift works
# START=50
# END=150
# ax=y_sj_deriv.loc[START:END].plot(figsize=(15,5),color='black',alpha=0.3)
# y_sj_deriv_smooth_gauss_mean.loc[START:END].plot(ax=ax,label='Gaussian Smooth',color='red')
# y_sj_deriv_smooth_const_mean.loc[START:END].plot(ax=ax,label='Flat Window',color='purple')
# y_sj_deriv_smooth_const_mean_frame.loc[START:END].plot(ax=ax,label='Flat Window, shifted',color='blue')
# ax.legend()
# ax.set(xlabel="Week",ylabel="Change in Cases per Week")
# plt.savefig("deriv_smoothing.png")
# lags_list
# Adding the derivatives to the lag lists
# Might be confusing in nomenclature that I'm adding this smoothed feature to both
# the non-smoothed and smoothed lists
# target_deriv_smooth_gauss_lags
# target_deriv_smooth_const_lags
lags_list.append(target_deriv_smooth_const_lags)
# for lag_feature in target_deriv_smooth_const_lags:
# print(type(lag_feature))
# print(lag_feature)
# lags_list.append(lag_feature)
# smoothed_lags_list.append(lag_feature)
# lags_list.append(y_sj_deriv_smooth_const_mean_frame)
# lags_list.append(y_sj_deriv_smooth_gauss_mean_frame)
# smoothed_lags_list.append(y_sj_deriv_smooth_const_mean_frame)
# smoothed_lags_list.append(y_sj_deriv_smooth_gauss_mean_frame)
for lag_feature in lags_list:
print(type(lag_feature))
for col in lag_feature.columns:
print(col)
dengue_features_sj_train_lags = pd.concat(lags_list, axis=1)
dengue_features_no_target_sj_train_lags = pd.concat(lags_list_no_target, axis=1)
dengue_smoothed_features_sj_train_lags = pd.concat(smoothed_lags_list, axis=1)
dengue_smoothed_features_no_target_sj_train_lags = pd.concat(
smoothed_lags_list_no_target, axis=1
)
X_lags_no_target_for_mi = dengue_features_no_target_sj_train_lags.dropna()
X_lags_for_mi = dengue_features_sj_train_lags.dropna()
# print(X_lags_no_target_for_mi)
(y_lags_no_target_for_mi, X_lags_no_target_for_mi) = y_sj.align(
X_lags_no_target_for_mi, join="inner", axis=0
)
(y_lags_for_mi, X_lags_for_mi) = y_sj.align(X_lags_for_mi, join="inner", axis=0)
print(X_lags_no_target_for_mi.shape)
print(y_lags_no_target_for_mi.shape)
mi_scores = make_mi_scores(X_lags_no_target_for_mi, y_lags_no_target_for_mi, "auto")
mi_with_target_scores = make_mi_scores(X_lags_for_mi, y_lags_for_mi, "auto")
print("Mutual Information Scores without target info:")
print(mi_scores.head(24))
print("Mutual Information Scores, including target-derived features")
print(mi_with_target_scores.head(20))
from sklearn.model_selection import train_test_split
def make_multistep_target(ts, steps):
return pd.concat({f"y_step_{i + 1}": ts.shift(-i) for i in range(steps)}, axis=1)
def plot_multistep(y, every=1, ax=None, palette_kwargs=None):
palette_kwargs_ = dict(palette="husl", n_colors=6, desat=None)
# palette_kwargs_ = dict(palette='husl', n_colors=16, desat=None)
if palette_kwargs is not None:
palette_kwargs_.update(palette_kwargs)
palette = sns.color_palette(**palette_kwargs_)
if ax is None:
fig, ax = plt.subplots()
ax.set_prop_cycle(plt.cycler("color", palette))
for date, preds in y[::every].iterrows():
# preds.index = pd.period_range(start=date, periods=len(preds))
preds.index = range(date, date + len(preds))
preds.plot(ax=ax, label=f"Forecast from {date}")
return ax
target_forecast_steps = 20
y_sj_multistep = make_multistep_target(y_sj, steps=target_forecast_steps).dropna()
# # Defining Inputs and Models
# each model may require different X,y
# especially based on rows we remove
list_X = []
list_y = []
model_list = []
model_labels = []
model_titles = []
X_train_list = []
X_valid_list = []
y_train_list = []
y_valid_list = []
# # Defining Models
from sklearn.linear_model import Ridge, Lasso
from sklearn.linear_model import LinearRegression
from xgboost import XGBRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.multioutput import RegressorChain
from sklearn.metrics import mean_squared_error
print("Available columns:")
for col in dengue_features_sj_train_lags.columns:
print(col)
list_X = []
list_y = []
model_list = []
model_labels = []
model_titles = []
X_train_list = []
X_valid_list = []
y_train_list = []
y_valid_list = []
n_estimators = 30
n_jobs = 4
model_labels.append("RegChain1_Target_Only")
model_titles.append("Regressor Chain 1 Target Only")
model_1 = RegressorChain(XGBRegressor(n_estimators=n_estimators, n_jobs=n_jobs))
model_list.append(model_1)
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
list_X.append(local_X)
list_y.append(local_y)
model_labels.append("RegChain1_Dew")
model_titles.append("Regressor Chain 1 Dew")
model_1 = RegressorChain(XGBRegressor(n_estimators=n_estimators, n_jobs=n_jobs))
model_list.append(model_1)
# features_for_model=['y_lag_1','y_lag_2','y_lag_3','y_lag_4','y_lag_5',
# 'y_lag_6','y_lag_7','y_lag_8','y_lag_9','y_lag_10',
# 'dew_pt_lag_6','dew_pt_lag_7','dew_pt_lag_8','dew_pt_lag_9',
# 'dew_pt_lag_10','dew_pt_lag_11','dew_pt_lag_12'
# ]
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
"dew_pt_lag_1",
"dew_pt_lag_2",
"dew_pt_lag_3",
"dew_pt_lag_4",
"dew_pt_lag_5",
"dew_pt_lag_6",
"dew_pt_lag_7",
"dew_pt_lag_8",
"dew_pt_lag_9",
"dew_pt_lag_10",
"dew_pt_lag_11",
"dew_pt_lag_12",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
list_X.append(local_X)
list_y.append(local_y)
model_labels.append("RegChain1_Dew_Deriv")
model_titles.append("Regressor Chain 1 Dew Deriv")
model_1 = RegressorChain(XGBRegressor(n_estimators=n_estimators, n_jobs=n_jobs))
model_list.append(model_1)
# features_for_model=['y_lag_1','y_lag_2','y_lag_3','y_lag_4','y_lag_5',
# 'y_lag_6','y_lag_7','y_lag_8','y_lag_9','y_lag_10',
# 'dew_pt_lag_6','dew_pt_lag_7','dew_pt_lag_8','dew_pt_lag_9',
# 'dew_pt_lag_10','dew_pt_lag_11','dew_pt_lag_12'
# ]
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
"target_deriv_smooth_const_mean_lag_1",
"target_deriv_smooth_const_mean_lag_2",
"target_deriv_smooth_const_mean_lag_3",
"target_deriv_smooth_const_mean_lag_4",
"target_deriv_smooth_const_mean_lag_5",
"dew_pt_lag_1",
"dew_pt_lag_2",
"dew_pt_lag_3",
"dew_pt_lag_4",
"dew_pt_lag_5",
"dew_pt_lag_6",
"dew_pt_lag_7",
"dew_pt_lag_8",
"dew_pt_lag_9",
"dew_pt_lag_10",
"dew_pt_lag_11",
"dew_pt_lag_12",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
list_X.append(local_X)
list_y.append(local_y)
model_labels.append("RegChain1_Dew_Temp")
model_titles.append("Regressor Chain 1 Dew Temp")
model = RegressorChain(XGBRegressor(n_estimators=n_estimators, n_jobs=n_jobs))
model_list.append(model)
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
"dew_pt_lag_6",
"dew_pt_lag_7",
"dew_pt_lag_8",
"dew_pt_lag_9",
"dew_pt_lag_10",
"dew_pt_lag_11",
"dew_pt_lag_12",
"mean_temp_lag_6",
"mean_temp_lag_7",
"mean_temp_lag_8",
"mean_temp_lag_9",
"mean_temp_lag_10",
"mean_temp_lag_11",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
list_X.append(local_X)
list_y.append(local_y)
model_labels.append("RegChain1_Dew_Precip")
model_titles.append("Regressor Chain 1 Dew Precip")
model = RegressorChain(XGBRegressor(n_estimators=n_estimators, n_jobs=n_jobs))
model_list.append(model)
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
"dew_pt_lag_1",
"dew_pt_lag_2",
"dew_pt_lag_3",
"dew_pt_lag_4",
"dew_pt_lag_5",
"dew_pt_lag_6",
"dew_pt_lag_7",
"dew_pt_lag_8",
"dew_pt_lag_9",
"dew_pt_lag_10",
"dew_pt_lag_11",
"dew_pt_lag_12",
"precip_lag_1",
"precip_lag_2",
"precip_lag_3",
"precip_lag_4",
"precip_lag_5",
"precip_lag_6",
"precip_lag_7",
"precip_lag_8",
"precip_lag_9",
"precip_lag_10",
"precip_lag_11",
"precip_lag_12",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
list_X.append(local_X)
list_y.append(local_y)
model_labels.append("RegChain1_Dew_Temp_Hum")
model_titles.append("Regressor Chain 1 Dew Temp Hum")
model = RegressorChain(XGBRegressor(n_estimators=n_estimators, n_jobs=n_jobs))
model_list.append(model)
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
"dew_pt_lag_6",
"dew_pt_lag_7",
"dew_pt_lag_8",
"dew_pt_lag_9",
"dew_pt_lag_10",
"dew_pt_lag_11",
"dew_pt_lag_12",
"mean_temp_lag_6",
"mean_temp_lag_7",
"mean_temp_lag_8",
"mean_temp_lag_9",
"mean_temp_lag_10",
"mean_temp_lag_11",
"rel_hum_lag_6",
"rel_hum_lag_7",
"rel_hum_lag_8",
"rel_hum_lag_9",
"rel_hum_lag_10",
"rel_hum_lag_11",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
list_X.append(local_X)
list_y.append(local_y)
model_labels.append("RegChain1_Dew_Temp_Hum_Precip")
model_titles.append("Regressor Chain 1 Dew Temp Hum Precip")
model = RegressorChain(XGBRegressor(n_estimators=n_estimators, n_jobs=n_jobs))
model_list.append(model)
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
"dew_pt_lag_6",
"dew_pt_lag_7",
"dew_pt_lag_8",
"dew_pt_lag_9",
"dew_pt_lag_10",
"dew_pt_lag_11",
"dew_pt_lag_12",
"mean_temp_lag_6",
"mean_temp_lag_7",
"mean_temp_lag_8",
"mean_temp_lag_9",
"mean_temp_lag_10",
"mean_temp_lag_11",
"rel_hum_lag_6",
"rel_hum_lag_7",
"rel_hum_lag_8",
"rel_hum_lag_9",
"rel_hum_lag_10",
"rel_hum_lag_11",
"precip_lag_2",
"precip_lag_3",
"precip_lag_4",
"precip_lag_5",
"precip_lag_6",
"precip_lag_7",
"precip_lag_8",
"precip_lag_9",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
list_X.append(local_X)
list_y.append(local_y)
# Neural Network models
# need to make multiple outputs.
from tensorflow import keras
from tensorflow.keras.layers import Dense, BatchNormalization
from tensorflow.keras import Input, Model
from tensorflow.keras import layers
# NN Model 1
# temporarily resetting lists for testing
if False:
list_X = []
list_y = []
model_list = []
model_labels = []
model_titles = []
X_train_list = []
X_valid_list = []
y_train_list = []
y_valid_list = []
# nn_loss_choice='mean_squared_logarithmic_error'
nn_loss_choice = "mse"
# would some loss options emphasize the peaked outbreaks better?
# Should the different time step outputs have different losses?
model_labels.append("NN_1_Dew")
model_titles.append("Neural Network 1 Dew")
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
"dew_pt_lag_1",
"dew_pt_lag_2",
"dew_pt_lag_3",
"dew_pt_lag_4",
"dew_pt_lag_5",
"dew_pt_lag_6",
"dew_pt_lag_7",
"dew_pt_lag_8",
"dew_pt_lag_9",
"dew_pt_lag_10",
"dew_pt_lag_11",
"dew_pt_lag_12",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
print(local_X.shape[1])
inputs = Input(shape=(local_X.shape[1],), name="input")
print(inputs)
x = BatchNormalization()(inputs)
x = Dense(20, activation="relu", name="layer2")(x)
x = BatchNormalization()(x)
x = Dense(20, activation="relu", name="layer3")(x)
x = Dense(20, activation="sigmoid", name="layer4")(x)
x = Dense(20, activation=None, name="layer5")(x)
# making a separate output layer for each output
# output_layers=[]
# for col in local_y.columns:
# output_layer=Dense(1,name=col)(x)
# output_layers.append(output_layer)
model = Model(inputs=inputs, outputs=x)
# model = Model(inputs=inputs,outputs=output_layers)
loss_dict = {}
for col in local_y.columns:
loss_dict[col] = nn_loss_choice
# model.compile(loss=loss_dict,optimizer='adam')
model.compile(loss=nn_loss_choice, optimizer="adam")
print(model)
model.summary()
keras.utils.plot_model(
model, "model_nn1.png", show_shapes=True, show_layer_activations=False
)
model_list.append(model)
list_X.append(local_X)
list_y.append(local_y)
# ====================================================================
model_labels.append("NN_2_Dew_Deriv")
model_titles.append("Neural Network 2 Dew Deriv")
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
"target_deriv_smooth_const_mean_lag_1",
"target_deriv_smooth_const_mean_lag_2",
"target_deriv_smooth_const_mean_lag_3",
"target_deriv_smooth_const_mean_lag_4",
"target_deriv_smooth_const_mean_lag_5",
"dew_pt_lag_1",
"dew_pt_lag_2",
"dew_pt_lag_3",
"dew_pt_lag_4",
"dew_pt_lag_5",
"dew_pt_lag_6",
"dew_pt_lag_7",
"dew_pt_lag_8",
"dew_pt_lag_9",
"dew_pt_lag_10",
"dew_pt_lag_11",
"dew_pt_lag_12",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
inputs = Input(shape=(local_X.shape[1],), name="input")
x = BatchNormalization()(inputs)
x = Dense(20, activation="relu", name="layer2")(x)
x = BatchNormalization()(x)
x = Dense(20, activation="relu", name="layer3")(x)
x = Dense(20, activation="sigmoid", name="layer4")(x)
x = Dense(20, activation=None, name="layer5")(x)
model = Model(inputs=inputs, outputs=x)
model.compile(loss=nn_loss_choice, optimizer="adam")
print(model)
model.summary()
keras.utils.plot_model(
model,
f"model_nn_{model_labels[-1]}.png",
show_shapes=True,
show_layer_activations=False,
)
model_list.append(model)
list_X.append(local_X)
list_y.append(local_y)
# ====================================================================
model_labels.append("NN_3_Dew_Precip")
model_titles.append("Neural Network 3 Dew Precip")
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
"dew_pt_lag_1",
"dew_pt_lag_2",
"dew_pt_lag_3",
"dew_pt_lag_4",
"dew_pt_lag_5",
"dew_pt_lag_6",
"dew_pt_lag_7",
"dew_pt_lag_8",
"dew_pt_lag_9",
"dew_pt_lag_10",
"dew_pt_lag_11",
"dew_pt_lag_12",
"precip_lag_1",
"precip_lag_2",
"precip_lag_3",
"precip_lag_4",
"precip_lag_5",
"precip_lag_6",
"precip_lag_7",
"precip_lag_8",
"precip_lag_9",
"precip_lag_10",
"precip_lag_11",
"precip_lag_12",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
print(local_X.shape[1])
inputs = Input(shape=(local_X.shape[1],), name="input")
print(inputs)
x = BatchNormalization()(inputs)
x = Dense(20, activation="relu", name="layer2")(x)
x = BatchNormalization()(x)
x = Dense(20, activation="relu", name="layer3")(x)
x = Dense(20, activation="sigmoid", name="layer4")(x)
x = Dense(20, activation=None, name="layer5")(x)
# making a separate output layer for each output
# output_layers=[]
# for col in local_y.columns:
# output_layer=Dense(1,name=col)(x)
# output_layers.append(output_layer)
model = Model(inputs=inputs, outputs=x)
# model = Model(inputs=inputs,outputs=output_layers)
loss_dict = {}
for col in local_y.columns:
loss_dict[col] = nn_loss_choice
# model.compile(loss=loss_dict,optimizer='adam')
model.compile(loss=nn_loss_choice, optimizer="adam")
print(model)
model.summary()
keras.utils.plot_model(
model, "model_nn3.png", show_shapes=True, show_layer_activations=False
)
model_list.append(model)
list_X.append(local_X)
list_y.append(local_y)
# 'rel_hum_lag_6','rel_hum_lag_7','rel_hum_lag_8',
# 'rel_hum_lag_9','rel_hum_lag_10','rel_hum_lag_11',
# ====================================================================
model_labels.append("NN_4_Dew_Precip_Hum")
model_titles.append("Neural Network 4 Dew Precip Humid")
features_for_model = [
"y_lag_1",
"y_lag_2",
"y_lag_3",
"y_lag_4",
"y_lag_5",
"y_lag_6",
"y_lag_7",
"y_lag_8",
"y_lag_9",
"y_lag_10",
"dew_pt_lag_1",
"dew_pt_lag_2",
"dew_pt_lag_3",
"dew_pt_lag_4",
"dew_pt_lag_5",
"dew_pt_lag_6",
"dew_pt_lag_7",
"dew_pt_lag_8",
"dew_pt_lag_9",
"dew_pt_lag_10",
"dew_pt_lag_11",
"dew_pt_lag_12",
"precip_lag_1",
"precip_lag_2",
"precip_lag_3",
"precip_lag_4",
"precip_lag_5",
"precip_lag_6",
"precip_lag_7",
"precip_lag_8",
"precip_lag_9",
"precip_lag_10",
"precip_lag_11",
"precip_lag_12",
"rel_hum_lag_1",
"rel_hum_lag_2",
"rel_hum_lag_3",
"rel_hum_lag_4",
"rel_hum_lag_5",
"rel_hum_lag_6",
"rel_hum_lag_7",
"rel_hum_lag_8",
"rel_hum_lag_9",
"rel_hum_lag_10",
"rel_hum_lag_11",
"rel_hum_lag_12",
]
local_X = dengue_features_sj_train_lags[features_for_model].dropna()
local_y = y_sj_multistep
print(local_X.shape[1])
inputs = Input(shape=(local_X.shape[1],), name="input")
print(inputs)
x = BatchNormalization()(inputs)
x = Dense(20, activation="relu", name="layer2")(x)
x = BatchNormalization()(x)
x = Dense(20, activation="relu", name="layer3")(x)
x = Dense(20, activation="sigmoid", name="layer4")(x)
x = Dense(20, activation=None, name="layer5")(x)
model = Model(inputs=inputs, outputs=x)
# model = Model(inputs=inputs,outputs=output_layers)
loss_dict = {}
for col in local_y.columns:
loss_dict[col] = nn_loss_choice
# model.compile(loss=loss_dict,optimizer='adam')
model.compile(loss=nn_loss_choice, optimizer="adam")
print(model)
model.summary()
keras.utils.plot_model(
model, "model_nn4.png", show_shapes=True, show_layer_activations=False
)
model_list.append(model)
list_X.append(local_X)
list_y.append(local_y)
# code from https://www.kaggle.com/code/ryanholbrook/dropout-and-batch-normalization
# layers.Dense(16, activation='relu'),
# layers.BatchNormalization(),
# model = keras.Sequential([
# layers.Dense(1024, activation='relu', input_shape=[11]),
# layers.Dropout(0.3),
# layers.BatchNormalization(),
# layers.Dense(1024, activation='relu'),
# layers.Dropout(0.3),
# layers.BatchNormalization(),
# layers.Dense(1024, activation='relu'),
# layers.Dropout(0.3),
# layers.BatchNormalization(),
# layers.Dense(1),
# ])
# code from https://www.kaggle.com/code/nicapotato/keras-timeseries-multi-step-multi-output
# code from https://stackoverflow.com/questions/44036971/multiple-outputs-in-keras
# inputs = Input(shape=(3,), name='input')
# x = Dense(16, activation='relu', name='16')(inputs)
# x = Dense(32, activation='relu', name='32')(x)
# output1 = Dense(1, name='cont_out')(x)
# output2 = Dense(3, activation='softmax', name='cat_out')(x)
# model = Model(inputs=inputs, outputs=[output1, output2])
# model.compile(loss={'cont_out': 'mean_absolute_error',
# 'cat_out': 'sparse_categorical_crossentropy'},
# optimizer='adam',
# metrics={'cat_out': tf.metrics.SparseCategoricalAccuracy(name='acc')})
# history = model.fit(X, {'cont_out': Y, 'cat_out': Z}, epochs=10, batch_size=8)
# Time Series Only Models
# # Split the data
# Also, we can choose to standardize the data here (so we can make sure to just get the mean and std from the training sets)
# Switch to standardize all inputs
enable_standardize_all = True
test_size = 400
for i in range(len(list_X)):
local_X = list_X[i]
local_y = list_y[i]
local_y, local_X = local_y.align(local_X, join="inner", axis=0)
X_train, X_valid, y_train, y_valid = train_test_split(
local_X, local_y, test_size=test_size, shuffle=False
)
if enable_standardize_all:
X_valid = (X_valid - X_train.mean()) / X_train.std()
X_train = (X_train - X_train.mean()) / X_train.std()
X_train_list.append(X_train)
X_valid_list.append(X_valid)
y_train_list.append(y_train)
y_valid_list.append(y_valid)
# # Run the Fits
# For fitting methods that give histories
fit_histories = []
fit_history_labels = []
fit_history_indices = []
for i in range(len(model_list)):
model = model_list[i]
model_label = model_labels[i]
X_train = X_train_list[i]
y_train = y_train_list[i]
X_valid = X_valid_list[i]
y_valid = y_valid_list[i]
# can add choices based on tags in the model_label
# maybe add NN to the beginning
print(f"Fitting model {model_label}")
# print(y_train.head())
if model_label[0:2] == "NN":
print("I think this is a Neural Net model. Fitting accordingly.")
# FIXME increase epochs after debugging
# num_epochs=10
num_epochs = 800
batch_size = 10
# batch_size=50
history = model.fit(
X_train,
y_train,
epochs=num_epochs,
validation_data=(X_valid, y_valid),
batch_size=batch_size,
verbose=0,
)
# history = model.fit(X_train,y_train,epochs=num_epochs,batch_size=52,verbose=0)
# history = model.fit(X_train,y_train)
fit_histories.append(history)
fit_history_labels.append(model_label)
fit_history_indices.append(i)
else:
##history = model.fit(X, {'cont_out': Y, 'cat_out': Z}, epochs=10, batch_size=8)
model.fit(X_train, y_train)
# history = model.fit(X_train,y_train)
# fit_histories.append(history)
# fit_history_labels.append(model_label)
# fit_history_indices.append(i)
# print(type(history))
print(f" Finished fitting model {model_label}")
# history = model.fit(
# X_train, y_train,
# validation_data=(X_valid, y_valid),
# batch_size=256,
# epochs=100,
# verbose=0,
# )
# Show the learning curves
# history_df = pd.DataFrame(history.history)
# history_df.loc[:, ['loss', 'val_loss']].plot();
for i in range(len(fit_history_indices)):
history = fit_histories[i]
label = fit_history_labels[i]
fit_index = fit_history_indices[i] # global model index
print(f"Giving fitting history information for {label}")
print(type(history))
print(history)
history_df = pd.DataFrame(history.history)
print(history_df.columns)
# Start the plot at epoch 5
history_df.loc[5:, ["loss", "val_loss"]].plot()
plt.legend([f"Model {i+1} Training Loss", f"Model {i+1} Validation Loss"])
plt.savefig(f"model_{label}_fit_history.png")
# history_df.loc[5:, ['y_step_10_loss', 'val_y_step_10_loss']].plot()
# plt.legend(['Training Loss (Step 10)','Validation Loss (Step 10)'])
# plt.savefig(f'model_{label}_fit_history.png')
# # Produce Predictions
y_pred_train_list = []
y_pred_valid_list = []
for i in range(len(model_list)):
model = model_list[i]
model_label = model_labels[i]
X_train = X_train_list[i]
y_train = y_train_list[i]
X_valid = X_valid_list[i]
y_valid = y_valid_list[i]
print(f"Producing predictions for model {model_label}")
if model_label[0:2] == "NN":
print(
"I think this is a Neural Net model. Formatting input and output accordingly."
)
temp_predict_train = model.predict(X_train)
print(type(temp_predict_train[0]))
y_pred_train = pd.DataFrame(
temp_predict_train, index=y_train.index, columns=y_train.columns
)
print(f"y_pred_train df has shape {y_pred_train.shape}")
temp_predict_valid = model.predict(X_valid)
print(type(temp_predict_valid))
# if model_label != 'NN_1_Dew':
# temp_predict_valid=np.concatenate(temp_predict_valid,axis=1)
y_pred_valid = pd.DataFrame(
temp_predict_valid, index=y_valid.index, columns=y_valid.columns
)
else:
y_pred_train = pd.DataFrame(
model.predict(X_train),
index=y_train.index,
columns=y_train.columns,
).clip(0.0)
y_pred_valid = pd.DataFrame(
model.predict(X_valid),
index=y_valid.index,
columns=y_valid.columns,
).clip(0.0)
y_pred_train_list.append(y_pred_train)
y_pred_valid_list.append(y_pred_valid)
# # Run Metrics
import tensorflow_addons as tfa
rmse_train_list = []
rmse_valid_list = []
rsquare_list = []
# metric for NNs
r_square_metric = tfa.metrics.RSquare()
for i in range(len(model_list)):
model = model_list[i]
model_label = model_labels[i]
X_valid = X_valid_list[i]
y_train = y_train_list[i]
y_valid = y_valid_list[i]
y_pred_train = y_pred_train_list[i]
y_pred_valid = y_pred_valid_list[i]
print(f"y_train has type {type(y_train)}")
print(f"y_pred_train has type {type(y_pred_train)}")
print(f"y_train has shape {y_train.shape}")
print(f"y_pred_train has shape {y_pred_train.shape}")
train_rmse = mean_squared_error(y_train, y_pred_train, squared=False)
test_rmse = mean_squared_error(y_valid, y_pred_valid, squared=False)
if model_label[0:2] == "NN":
print(type(model))
# storing 3, an impossible value, for now
r_square = 3
# r_square = tfa.metrics.RSquare
# r_square=model.score(X_valid,y_valid)
else:
r_square = model.score(X_valid, y_valid)
rmse_train_list.append(train_rmse)
rmse_valid_list.append(test_rmse)
rsquare_list.append(r_square)
print("===========================================")
print(f"Model {model_label}")
print(f" RMSE(training,valid)=({train_rmse:.2f},{test_rmse:.2f})")
print(f" R^2(valid)=({r_square:.2f})")
# print('Model Two:')
# print((f"Train RMSE: {train_rmse:.2f}\n" f"Test RMSE: {test_rmse:.2f}"))
# print('')
# r_square=model_2.score(X_2_valid,y_2_valid)
# print(f"Model 2 Train R^2: {r_square:.2f}\n")
print(rmse_train_list)
print(rmse_valid_list)
print(rsquare_list)
# # Plot Results
for i in range(len(model_list)):
y_pred_train = y_pred_train_list[i]
y_pred_valid = y_pred_valid_list[i]
model_name = model_labels[i]
model_title = model_titles[i]
EVERY = 20
START = 50
END = 550
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(15, 10))
line1 = y_sj.loc[START:END].plot(ax=ax1, label="Data", color="black")
x = plot_multistep(y_pred_train.loc[START:END], ax=ax1, every=EVERY)
ax1.set_ylabel("Weekly Cases")
ax1.legend(["Data (Training)", f"Model {i+1}:{model_title} Forecasts (training)"])
EVERY = 20
START = 600
END = 1100
# fig, ax = plt.subplots(1, 1, figsize=(11, 4))
line1 = y_sj.loc[START:END].plot(ax=ax2, label="Data", color="black")
x = plot_multistep(y_pred_valid.loc[START:END], ax=ax2, every=EVERY)
ax2.set_ylabel("Weekly Cases")
ax2.legend(["Data (Validation)", f"Model {i+1}:{model_title} Forecasts"])
plt.savefig(f"model_{i+1}_{model_name}.png")
|
# # Context
# Stock marketing is the pillar of the economy of this world and one of the best place to invest to grow our finance. I am interested in learning how different elements like cpi, ppe, fed interest rate, fund and others stock will affect the stock value. I used historical value along with random tree classification to predict the stock in next quater. Of course, just using these value wouldn't not be enough to accurately in predicting the stock value(eps, p/e ratio, balance sheet analysis of a company and their relative stock will be my next project) but I try to least understand which feature(correlation) will be important in changing the stock value and indentify trend.
# **Why random forest**
# I want to predict whether the stock will go up or down using historical data, so this is a binary prediction - supervised learning, binary classificaiton and random forest is one of the best algorithm in binary classification especially on over-fitting the data as when I test run it, the training data perform really well but not the prediction.
# # Data Dictionary
# The dataset has the following information:
# * cpi - Consumer price index (inflation)
# * ppp - Purchasing Power Index
# * Fed Interest - interest rate of borrowing
# * Fed Fund - Total value of the assets of all Federal Reserve Banks
# * Stock - stock price of each company monetly
# * Debt - total us debt
# * Productivity - Nonfarm productibity, labor, manufacture
# * SP 500 - sp 500 price to earning ratio
# * volte - market volotile
# * real yeild - interest rate with inflation adjustment will get the real change in purchase power
# * Volume - volume traded on different venue
# data source - https://www.cboe.com/us/equities/market_share/ , https://fred.stlouisfed.org/, https://fiscaldata.treasury.gov/datasets/monthly-statement-public-debt/summary-of-treasury-securities-outstanding, https://www.bls.gov/productivity/data.htm
# # 1. Data Clean
# Import the necessary libraries and briefly explain the use of each library
import pandas as pd
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
import warnings
import difflib
warnings.filterwarnings("ignore")
from dateutil.relativedelta import relativedelta
import featuretools as ft
import datetime as dt
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sns
# Feataurestools for feature engineering
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.metrics import (
confusion_matrix,
accuracy_score,
ConfusionMatrixDisplay,
f1_score,
)
# Importing gradient boosting regressor, to make prediction
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from scipy.stats import randint as sp_randint
from warnings import simplefilter
# ignore all future warnings
simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter("ignore", FutureWarning)
path = os.getcwd()
excel_files = glob.glob(os.path.join(r"/kaggle/input/stock2011/stock/cpi", "*.xlsx"))
xls_files = glob.glob(os.path.join(r"/kaggle/input/stock2011/stock/cpi", "*.xls"))
cpi_month = {}
cpi_month2 = {}
excel_files = glob.glob(os.path.join(r"/kaggle/input/stock2011/stock/cpi", "*.xlsx"))
xls_files = glob.glob(os.path.join(r"/kaggle/input/stock2011/stock/cpi", "*.xls"))
volume_files = glob.glob(
os.path.join(r"/kaggle/input/stock2011/stock/market_volume", "*.csv")
)
cpi_month = {}
cpi_month2 = {}
prod = {}
volume = {}
for f in excel_files:
cpi_month[f.split("/")[-1].split(".")[0]] = pd.read_excel(f)
for f in xls_files:
cpi_month2[f.split("/")[-1].split(".")[0]] = pd.read_excel(f)
for f in cpi_month:
cpi_month[f].columns = cpi_month[f].iloc[10]
cpi_month[f] = cpi_month[f].drop(cpi_month[f].index[0:11])
cpi_month[f] = cpi_month[f].drop(["HALF1", "HALF2", "Year"], axis=1)
if "Annual" in cpi_month[f].columns:
cpi_month[f] = cpi_month[f].drop(["Annual"], axis=1)
for f in volume_files:
volume[f.split("/")[-1].split(".")[0]] = pd.read_csv(f)
date = pd.date_range(start="1/1/2012", end="12/1/2021", freq="MS")
cpi_allitem = {}
for f in cpi_month:
k = pd.Series()
for i in range(0, len(cpi_month[f]) - 1):
k = pd.concat([k, cpi_month[f].iloc[i, :]], ignore_index=True)
df = pd.DataFrame(data=k)
df.index = date
df = df.rename(columns={0: "cpi"})
cpi_allitem[f] = df
final_cpi = pd.DataFrame(data=date, columns=["Date"], index=date)
for i in cpi_month2:
cpi_month2[i] = cpi_month2[i].iloc[11:, :]
k = cpi_month2[i]
k.columns = ["Date", i]
cpi_month2[i].index = date
final_cpi = pd.concat([final_cpi, cpi_month2[i].iloc[:, 1]], axis=1)
# #Above is all the cpi and fed data. Below is volume and gdp data clean
volume_df = pd.DataFrame()
for i in volume:
temp_df = pd.DataFrame(data=volume[i])
temp_df = temp_df[
[
"Month",
"Market Participant",
"Total Shares",
"Total Notional",
"Total Trade Count",
]
]
for k in range(0, len(temp_df)):
new_temp = pd.DataFrame(temp_df.iloc[k, 2:]).T
# new_temp = new_temp.drop('Market Participant', axis=1)
new_temp.columns = [
temp_df["Market Participant"][k] + "Total Share",
temp_df["Market Participant"][k] + "Total Notionale",
temp_df["Market Participant"][k] + "Total Trade Count",
]
new_temp.index = [temp_df["Month"][k]]
volume_df = pd.concat([volume_df, new_temp], axis=0)
volumefinal_df = pd.DataFrame()
for i in volume_df.columns:
volume_df2 = volume_df[i].dropna()
volumefinal_df = pd.concat([volumefinal_df, volume_df2], axis=1)
volumefinal_df = volumefinal_df.dropna(axis=1)
volumefinal_df.index = date
final_cpi = pd.concat([final_cpi, volumefinal_df], axis=1)
print(volumefinal_df)
debt = pd.read_csv(r"/kaggle/input/stock2011/stock/debt/debt.csv")
debt = debt[debt["Security Type Description"] == "Total Public Debt Outstanding"]
debt.index = debt["Record Date"]
debt = debt.sort_index()
debt = debt.iloc[1:-1, :]
debt.index = date
debt = debt["Total Public Debt Outstanding (in Millions)"]
debt.columns = ["Debt"]
debt = debt.astype(int)
final_cpi = pd.concat([final_cpi, debt], axis=1)
pe = pd.read_csv(
r"/kaggle/input/stock2011/stock/sp500 pe/MULTPL-SP500_PE_RATIO_MONTH (1).csv"
)
pe.columns = ["Date", "SP500PE"]
pe["Date"] = pe["Date"].apply(lambda x: dt.datetime.strptime(x, "%Y-%m-%d"))
pe.index = pe["Date"]
pe = pe.sort_index()
pe.index = date
pe = pe.drop("Date", axis=1)
pe.columns = ["SP500PE"]
final_cpi = pd.concat([final_cpi, pe], axis=1)
print(final_cpi)
yield_bond = pd.read_excel(
r"/kaggle/input/stock2011/stock/yield after inflation/DFII10.xls"
)
yield_bond = yield_bond.iloc[11:, :]
yield_bond.index = date
yield_bond = yield_bond.drop("FRED Graph Observations", axis=1)
yield_bond.columns = ["yield"]
final_cpi = pd.concat([final_cpi, yield_bond], axis=1)
gdp = pd.read_excel(r"/kaggle/input/stock2011/stock/gdp_monthly.xlsx")
gdp.columns = ["date", "gdp"]
xls_files = glob.glob(os.path.join(r"/kaggle/input/stock2011/stock", "*.xls"))
pce_group = {}
for f in xls_files:
pce_group[f.split("/")[-1].split(".")[0]] = pd.read_excel(f)
for f in pce_group:
pce_group[f] = pce_group[f].drop(pce_group[f].index[0:9])
pce_group[f].columns = ["Date", "pce"]
pce_group[f] = pce_group[f].drop(pce_group[f].index[0])
gdp["gdp"] = gdp["gdp"].astype(str)
gdp["gdp"] = gdp["gdp"].str.replace("[T.]", "")
gdp["gdp"] = gdp["gdp"].str.replace("1631954", "16319")
gdp["gdp"] = gdp["gdp"].astype(int)
for i in range(0, len(gdp)):
gdp["gdp"].iloc[i] = gdp["gdp"].iloc[i] * 100
for i in pce_group:
pce_group[i] = pce_group[i].reset_index(drop=True)
pce_group.pop("GDP", None)
for i in pce_group:
k = pd.DataFrame(pce_group[i])
k.columns = ["Date", i]
k.index = date
final_cpi = pd.concat([final_cpi, pce_group[i].iloc[:, 1]], axis=1)
gdp = gdp.sort_values("date")
gdp.index = date
final_cpi = pd.concat([final_cpi, gdp.iloc[:, 1]], axis=1)
print(gdp)
final_cpi["gdp"] = final_cpi["gdp"].replace(np.nan, 24520)
final_cpi.index = date
date2 = final_cpi["Date"]
final_df = final_cpi
final_cpi = final_cpi.drop("Date", axis=1)
for col in final_cpi.columns:
final_cpi[col] = final_cpi[col].astype(float)
final_df[col] = final_df[col].astype(float)
for col in final_cpi.columns:
print(col)
print("Skew :", round(final_cpi[col].skew(), 2))
plt.figure(figsize=(15, 4))
plt.subplot(1, 2, 1)
final_cpi[col].hist(bins=10, grid=False)
plt.ylabel("count")
plt.subplot(1, 2, 2)
sns.boxplot(x=final_cpi[col])
plt.show()
plt.figure(figsize=(8, 8))
sns.heatmap(final_cpi.corr())
plt.show()
# As we can see from the data, most of the graph skew ro the right, which make sense as economy is growing as a whole. From the correlation graph, we can also see that purchasing power, fed fund, and apparel cpi has almost no correlation in all data, which mean we can consider removing it, so do the apparel cpi. Beside that, also all data has strong correlation in their relationship. In this case, we will still keep the data for now, as I want to see the correlation of all data when stock value is added in.
# Fromk the correlation graph, we can see that the volume actually does not has too many relationship with cpi, pce, debt, and yield. However, we can see that cpi, pce, debt, and yield are highly correlated with each other. GDP is only correlated with purchase power.
# # Stock Data Clean
# I have collected average stock price of different companny every month and I will combine them with my data above for a random forest classification.
#
csv_files = glob.glob(
os.path.join(r"/kaggle/input/stock2011/stock/stock_month_new", "*.csv")
)
excel_files = glob.glob(
os.path.join(r"/kaggle/input/stock2011/stock/stock_month_new", "*.xlsx")
)
stock_month = {}
for f in csv_files:
stock_month[f.split("/")[-1].split(".")[0]] = pd.read_csv(f)
for f in excel_files:
stock_month[f.split("/")[-1].split(".")[0]] = pd.read_excel(f)
for i in stock_month:
stock_month[i]["Date"] = pd.to_datetime(stock_month[i]["Date"])
stock_month["crudeoil_month"] = stock_month["crudeoil_month"].iloc[:105, :]
# correct date to day 1
for i in stock_month:
date = stock_month[i]["Date"]
stock_month[i]["Date"] = stock_month[i]["Date"].to_numpy().astype("datetime64[M]")
for j in range(1, len(date)):
if abs(stock_month[i]["Date"][j] - stock_month[i]["Date"][j - 1]).days > 31:
stock_month[i]["Date"][j] = stock_month[i]["Date"][j] + relativedelta(
months=1
)
stock_month[i]["Date"] = stock_month[i]["Date"].to_numpy().astype("datetime64[M]")
for i in stock_month:
k = stock_month[i].columns
j = difflib.get_close_matches("close", k)
df = pd.DataFrame(data=stock_month[i][j])
df.columns = [i]
stock_month[i]["Date"] = stock_month[i]["Date"].drop_duplicates()
df.index = stock_month[i]["Date"]
final_df = pd.concat([final_df, df], axis=1)
for i in range(0, len(final_df["crudeoil_month"]) - 1):
if pd.isnull(final_df["crudeoil_month"].iloc[i]):
final_df["crudeoil_month"].iloc[i] = (
final_df["crudeoil_month"].iloc[i - 1]
+ final_df["crudeoil_month"].iloc[i + 1]
) / 2
final_df = final_df.iloc[2:-1, :]
final_df = final_df.reset_index()
month = final_df["Date"].apply(lambda x: x.month)
final_df["ID"] = np.nan
final_df["Quater"] = np.nan
final_df.insert(1, "Month", month)
quater = 0
for i in range(0, len(final_df)):
if 0 < final_df["Month"][i] <= 3:
quater = 1
elif 3 < final_df["Month"][i] <= 6:
quater = 2
elif 6 < final_df["Month"][i] <= 9:
quater = 3
elif 9 < final_df["Month"][i] <= 12:
quater = 4
final_df["Quater"][i] = quater
final_df["ID"][i] = (
str(final_df["Date"][i].year)
+ "-"
+ str(final_df["Date"][i].month)
+ "Q"
+ str(quater)
)
cols = final_df.columns.tolist()
cols = cols[-2:-1] + cols[0:2] + cols[-1:] + cols[2:-2]
final_df = final_df[cols]
k = 1
final_df = final_df.drop(["index"], axis=1)
for i in range(4, len(final_df.columns)):
i = i + k
k = k + 2
final_df.insert(i, str(final_df.columns[i - 1] + "change"), np.nan)
for q in range(1, len(final_df)):
final_df[final_df.columns[i]][q] = (
final_df[final_df.columns[i - 1]][q]
- final_df[final_df.columns[i - 1]][q - 1]
)
final_df.insert(i + 1, str(final_df.columns[i] + "UpDown"), np.nan)
for j in range(1, len(final_df)):
if final_df[final_df.columns[i]][j] > 0:
final_df[final_df.columns[i + 1]][j] = 1
else:
final_df[final_df.columns[i + 1]][j] = 0
final_df = final_df.iloc[1:, ::]
final_df2 = final_df
final_df3 = final_df
final_df = final_df.drop("Date", axis=1)
final_df = final_df.drop("ID", axis=1)
# # Random Forest Classifier
# Below I choose one random stock. For example, amazon's stock to check performance of random forest.
from sklearn.model_selection import train_test_split
# Splitting the data into train and test
X = final_df.drop("amazon_monthchangeUpDown", axis=1)
# Putting response variable to y
Y = final_df["amazon_monthchangeUpDown"]
X_train, X_test, y_train, y_test = train_test_split(
X, Y, train_size=0.7, random_state=42
)
classifier_rf = RandomForestClassifier(
random_state=42, n_jobs=-1, max_depth=5, n_estimators=100, oob_score=True, verbose=2
)
classifier_rf.fit(X_train, y_train)
# checking the oob score
print(classifier_rf.oob_score_)
da = RandomForestClassifier(
max_depth=3, min_samples_leaf=5, n_estimators=600, n_jobs=-1, random_state=42
)
real_df = da.fit(X_train, y_train)
print("da = ", real_df)
print(da.get_params())
imp_df = pd.DataFrame({"Varname": X_train.columns, "Imp": da.feature_importances_})
imp_df = imp_df.sort_values(by="Imp", ascending=False)
print(imp_df)
dada = da.predict(X_test)
print(da.score(X_test, y_test))
# # The below show how I get feature importance
da = RandomForestClassifier(
max_depth=3, min_samples_leaf=5, n_estimators=600, n_jobs=-1, random_state=42
)
real_df = da.fit(X_train, y_train)
print("da = ", real_df)
print(da.get_params())
imp_df = pd.DataFrame({"Varname": X_train.columns, "Imp": da.feature_importances_})
imp_df = imp_df.sort_values(by="Imp", ascending=False)
print(imp_df)
dada = da.predict(X_test)
print(da.score(X_test, y_test))
# Now that I tested how to get the model score, and their feature importance, I want to first further clean up my data for better performance. For the data, we want to focus on only the change on the market as their value does not mean much but the change anhd rate of change of their value. So I will remove all their value and **leave only their change in the same month and next month as the end goal for this project is to predict the change in next month/quater.**
final_df2 = final_df2.drop(["ID", "Month", "Date", "Quater"], axis=1)
new_threem_df = pd.DataFrame()
last_df = pd.DataFrame()
for i in final_df2.columns:
quater = 1
new_threem_df = pd.DataFrame()
j = 1
o = 0
p = 3
for k in range(0, int(len(final_df2) / 3)):
if quater == 5:
quater = 1
yk = pd.DataFrame(
data=[final_df2[i].iloc[o:p]],
index=[str(final_df3["Date"][j].year) + "Quater" + str(quater)],
)
yk.columns = [i + "1", i + "2", i + "3"]
new_threem_df = pd.concat([new_threem_df, yk], ignore_index=False)
quater = quater + 1
p = p + 3
o = o + 3
j = j + 3
last_df = pd.concat([last_df, new_threem_df], axis=1)
reindex = last_df.index.tolist()
reindex = reindex[1:] + ["2021Quater4"]
last_df.index = reindex
list_df = []
j = 2
for i in range(3, int(len(last_df.columns) / 3) + 1):
list_df = []
name = last_df.columns[j - 2]
# dont count for change column
name = name[-7:-1]
if any([x in name for x in ["change", "UpDown"]]):
j = j + 3
continue
else:
for k in range(0, len(last_df)):
if last_df.iloc[k, j] - last_df.iloc[k, j - 2] > 0:
list_df.append(1)
else:
list_df.append(0)
list2 = list_df
list2 = list2[1:] + [np.nan]
last_df.insert(j + 1, last_df.columns[j - 2] + "quaterUpDown", list_df)
last_df.insert(j + 2, last_df.columns[j - 2] + "NextQuaterUpDown", list2)
j = j + 5
clean_last = pd.DataFrame()
j = 3
for i in range(0, int(len(last_df.columns) / 3)):
clean_last = pd.concat([clean_last, last_df.iloc[:, j : j + 8]], axis=1)
j = j + 11
# drop next quater up down in xf
# remember to change param for final run
next_df = clean_last
clean_last.drop(last_df.tail(1).index, inplace=True)
all_bestrf = {}
accuracy = {}
count = 0
# using f1 score as most of the time all stock should be grwoing with gdp growing
for i in clean_last.columns:
list_df = []
name = i
name1 = i[-16::]
count = 0
if any([x in name1 for x in ["NextQuaterUpDown"]]):
xf_unclean = clean_last
for k in xf_unclean.columns:
if k[-16::] in ["NextQuaterUpDown"]:
if k == name:
continue
else:
xf_unclean = xf_unclean.drop(k, axis=1)
Xf = xf_unclean.drop(name, axis=1)
Yf = xf_unclean[name]
Xf_train, Xf_test, Yf_train, Yf_test = train_test_split(
Xf, Yf, train_size=0.6, random_state=1
)
oo = 1
while oo == 1:
# First create the base model to tune
rf = RandomForestClassifier()
n_estimators = sp_randint(10, 100)
max_features = ["log2", "sqrt", None]
max_depth = sp_randint(5, 20)
min_samples_split = sp_randint(100, 1000)
min_samples_leaf = sp_randint(2, 20)
bootstrap = [True, False]
# Create the random grid
random_grid = {
"n_estimators": n_estimators.rvs(10),
#'max_features': max_features,
"max_depth": max_depth.rvs(10),
#'min_samples_split': min_samples_split.rvs(10),
#'min_samples_leaf': min_samples_leaf,
"bootstrap": bootstrap,
"criterion": ["gini", "entropy"],
"max_leaf_nodes": sp_randint(5, 100),
}
rf_random = RandomizedSearchCV(
estimator=rf,
param_distributions=random_grid,
n_iter=20,
cv=3,
n_jobs=-1,
)
# Instantiate the grid search model
# Create a random forest classifier
rf_random.fit(Xf_train, Yf_train)
best_rf = rf_random.best_estimator_
predict_ff = best_rf.predict(Xf_test)
predict_ftrain = best_rf.predict(Xf_train)
print("Testing Score :", f1_score(Yf_test, predict_ff))
print("Training score:", f1_score(Yf_train, predict_ftrain))
print("predict test =", predict_ff)
print("Real test=", Yf_test.array)
# the accuracy for the model can be adjusted but in kaggle, without computational power, I put it low
if (
f1_score(Yf_test, predict_ff) < 0.5
or f1_score(Yf_train, predict_ftrain) < 0.5
):
if count < 30:
if f1_score(Yf_test, predict_ff) == 0:
accuracy[name] = float(1)
all_bestrf[name] = best_rf
imp_df = pd.DataFrame(
{
"Varname": Xf_train.columns,
"Imp": best_rf.feature_importances_,
}
)
imp_df = imp_df.sort_values(by="Imp", ascending=False)
print(imp_df)
oo = 0
else:
count = count + 1
continue
else:
accuracy[name] = f1_score(Yf_test, predict_ff)
all_bestrf[name] = best_rf
imp_df = pd.DataFrame(
{
"Varname": Xf_train.columns,
"Imp": best_rf.feature_importances_,
}
)
imp_df = imp_df.sort_values(by="Imp", ascending=False)
print(imp_df)
oo = 0
else:
oo = 0
accuracy[name] = f1_score(Yf_test, predict_ff)
# cm = confusion_matrix(Yf_test, predict_ff)
# ConfusionMatrixDisplay(confusion_matrix=cm).plot()
# plt.show()
imp_df = pd.DataFrame(
{"Varname": Xf_train.columns, "Imp": best_rf.feature_importances_}
)
imp_df = imp_df.sort_values(by="Imp", ascending=False)
print(imp_df)
all_bestrf[name] = best_rf
print("Done:", i)
print(all_bestrf)
|
import pandas as pd
a = pd.read_csv(
"/kaggle/input/bigbasket-entire-product-list-28k-datapoints/BigBasket Products.csv"
)
a
b = pd.read_csv(
"/kaggle/input/bigbasket-entire-product-list-28k-datapoints/BigBasket Products.csv",
usecols=["product", "category", "sub_category", "rating"],
)
b.head()
b.info()
# **Assigning Multi Level Index**
a.set_index("category") # single index
a.head(20)
a.set_index(["category", "sub_category"]) # multi level index
# it automatically clubs the data
# inplace=True should be passed for permanent update
# **We can have n number of index levels based on the requirements**
a.reset_index(inplace=True)
a
# **Index Details**
b = pd.read_csv(
"/kaggle/input/bigbasket-entire-product-list-28k-datapoints/BigBasket Products.csv",
usecols=["product", "category", "sub_category", "rating"],
index_col=["category", "sub_category"],
)
b.head()
# **Fetch Index**
b.index
b.index[0]
b.index.names
# **Fetch the Index using get level_values**
b.index._get_level_values(0) # it will return the 0th level index
b.index.get_level_values("sub_category") # can befetched using name of index too
|
# # Imports
# * Import all modules used in this notebook
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout, Flatten, Activation, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import Adam
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
# # Fetch data from CSV files
# * Provided with test.csv and train.csv
# * Read data in using Pandas
# * Inspect the data sets
#
test_data = pd.read_csv("../input/digit-recognizer/test.csv")
train_data = pd.read_csv("../input/digit-recognizer/train.csv")
test_data.head()
train_data.head()
# # Preprocess Data
# * Split train data into X_train and y_train.
# * X_train will consist of all columns except 'label' column
# * y_train will consist of only the 'label' column
# * The given test data does not have a 'label' column, so no need for preprocessing
X_train = train_data.drop(labels=["label"], axis=1)
y_train = train_data["label"]
X_test = test_data
# # Normalize Data
# * The given images contain pixels with values in the range of 0-255
# * We want to normalize these values into a value between 0.0 and 1.0 in order to make calculations easier and allow our model to learn at a faster rate
X_train = X_train / 255.0
X_test = X_test / 255.0
# # Reshape Data
# * The data provided is formatted as 784 pixels, but we want to reshape our data to represent 28x28 pixel images
print("Original X_train: ", X_train.shape)
print("Original X_test: ", X_test.shape)
X_train = X_train.values.reshape(-1, 28, 28, 1)
X_test = X_test.values.reshape(-1, 28, 28, 1)
print("Reshaped X_train: ", X_train.shape)
print("Reshaped X_test: ", X_test.shape)
# # One-hot-encode
# * Since we want to classify our data into ten classes (for digits 0-9) we can one hot encode our y_train with 10 values.
y_train = to_categorical(y_train, num_classes=10)
# # Create Validation Set
# * From our training data, we will extract a validation set to use when training our model
# * We will use a 90%-10% training-validation split
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.1, random_state=144
)
# # Create Model
# * Initialize a convolutional neural network
# * Utilize dropout layers in order to deal with the model overfitting
# * Compile the model using the Adam optimizer
model = Sequential()
model.add(
Conv2D(
filters=64,
kernel_size=(5, 5),
padding="same",
activation="relu",
input_shape=(28, 28, 1),
)
)
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(filters=128, kernel_size=(5, 5), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(10, activation="softmax"))
model.summary()
model.compile(loss="categorical_crossentropy", optimizer=Adam(), metrics=["accuracy"])
# # Data Augmentation
# * Generate augmented data from our training training data using the ImageDataGenerator
# * We don't want to have generated data that is vertically or horizontally flipped, for the case of 9 and 6
datagen = ImageDataGenerator(
rotation_range=10, horizontal_flip=False, vertical_flip=False
)
datagen.fit(X_train)
# # Train our Model
# * Fit the model over 30 epochs
# * Found that a batch size of 64 worked well
# * We use ReduceLROnPlateau in order to half the learning rate when the validation accuracy flattens out over 3 epochs, helping our model better locate a minimum during optimization
epochs = 30
batch_len = 64
learning_rate_reduction = ReduceLROnPlateau(
monitor="val_accuracy", patience=3, verbose=1, factor=0.5, min_lr=0.00001
)
history = model.fit(
datagen.flow(X_train, y_train, batch_size=batch_len),
steps_per_epoch=len(X_train) // batch_len,
epochs=epochs,
validation_data=(X_val, y_val),
callbacks=[learning_rate_reduction],
)
# # Plot Loss and Accuracy
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model Loss")
plt.ylabel("Loss")
plt.xlabel("Epochs")
plt.legend(["train", "val"])
plt.show()
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model Accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epochs")
plt.legend(["train", "val"])
plt.show()
# # Predict Test Data
# * Using our model, we can predit which digit each of the images given in our test set is
# * Store our model's predictions in a csv file
res = model.predict(X_test)
res = np.argmax(res, axis=1)
res = pd.Series(res, name="Label")
result = pd.concat([pd.Series(range(1, 28001), name="ImageId"), res], axis=1)
result.to_csv("result.csv", index=False)
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
dataset = pd.read_csv("/kaggle/input/salary-data/Salary_Data.csv")
dataset
x = dataset.iloc[:, :-1].values
y = dataset.iloc[:, :1].values
print("Year experience")
print(x)
print("Salary")
print(y)
plt.plot(x, y, color="red")
plt.title("Salary vs Experience(Raw dataset)")
plt.xlabel("Years of experience")
plt.ylabel("Salary")
plt.show()
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=1 / 4, random_state=0
)
lin_regressor = LinearRegression()
lin_regressor.fit(x_train, y_train)
y_pred = lin_regressor.predict(x_test)
y_pred
plt.scatter(x_train, y_train, color="green")
plt.plot(x_train, lin_regressor.predict(x_train), color="red")
plt.title("Salary vs Experience(Train dataset)")
plt.xlabel("Years of experience")
plt.ylabel("Salary")
plt.show()
plt.scatter(x_test, y_test, color="green")
plt.plot(x_test, lin_regressor.predict(x_test), color="red")
plt.title("Salary vs Experience(Test dataset)")
plt.xlabel("Years of experience")
plt.ylabel("Salary")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
import shutil
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
source_path = os.path.join(dirname, filename)
print(source_path)
shutil.copyfile(source_path, filename)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
## You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# install and input alpha_vantage
import importlib
try:
from alpha_vantage.timeseries import TimeSeries
except ImportError:
print("alpha_vantage is not installed, installing now...")
import requests
import time
import io
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
secret_value_0 = user_secrets.get_secret("alpha_key1")
secret_value_1 = user_secrets.get_secret("alpha_key2")
# All constants variables in this script
ALPHA_ACCESS = secret_value_1
ENDPOINT = "https://www.alphavantage.co/query"
ALPHA_MIN_LIMIT = 4
ALPHA_DAILY_LIMIT = 490
PROGRESS = "download_progress.csv"
METADATA = "meta.csv"
SLEEPTIMER = 60
def get_data_from_alpha_vantage(function, symbol, access_key):
"""
Get JSON data from Alpha Vantage.
Parameters:
- function (string) : time series of the data you want to enquire e.g. 'TIME_SERIES_WEEKLY_ADJUSTED', 'TIME_SERIES_DAILY'
- symbol (string) : symbol of the equity you want to enquire e.g. 'IBM'
- access_key (string) : access key to Alpha Vantage
Return:
alpha_data_dict (dictionary): JSON data from Alpha Vantage as a dictionary
"""
# set params for alpha vantage API
params = {
"function": function,
"symbol": symbol, # Replace with the symbol of the stock you're interested in
"outputsize": "full",
"datatype": "csv",
"apikey": access_key, # Replace with your Alpha Vantage API key
}
# if per minute limit has been reached, update download_progress.csv and sleep for SLEEPTIMER seconds
if daily_counter % ALPHA_MIN_LIMIT == 0:
update_progress_csv(download_progress_df) # update download_progress.csv
time.sleep(SLEEPTIMER) # sleep for SLEEPTIMER seconds
# get json from Alpha Vantage
try:
response = requests.get(ENDPOINT, params=params)
print(response)
data = response.content.decode("utf-8")
alpha_data = pd.read_csv(io.StringIO(data))
alpha_data.to_csv(symbol + ".csv")
except Exception as e:
print("Error:", str(e))
def get_listing_status(access_key):
"""
Get JSON data from Alpha Vantage.
Parameters:
- function (string) : time series of the data you want to enquire e.g. 'TIME_SERIES_WEEKLY_ADJUSTED', 'TIME_SERIES_DAILY'
- symbol (string) : symbol of the equity you want to enquire e.g. 'IBM'
- access_key (string) : access key to Alpha Vantage
Return:
alpha_data_dict (dictionary): JSON data from Alpha Vantage as a dictionary
"""
# set params for alpha vantage API
params = {
"function": "LISTING_STATUS",
"apikey": access_key, # Replace with your Alpha Vantage API key
}
# if per minute limit has been reached, update download_progress.csv and sleep for SLEEPTIMER seconds
if daily_counter % ALPHA_MIN_LIMIT == 0:
update_progress_csv(download_progress_df) # update download_progress.csv
time.sleep(SLEEPTIMER) # sleep for SLEEPTIMER seconds
# get json from Alpha Vantage
try:
response = requests.get(ENDPOINT, params=params)
print(response)
data = response.content.decode("utf-8")
listing_status = pd.read_csv(io.StringIO(data))
except Exception as e:
print("Error:", str(e))
listing_status = {}
return listing_status
def update_progress(progress_df, symbol, status):
"""
Update download progress of a symbol
Parameters:
- progress_df (DataFrame) : DataFrame which records the download status of each symbol
- symbol (string) : symbol of the equity you want to enquire e.g. 'IBM'
- status (string) : download status of the symbol i.e. 'Not Started' / 'Downloaded' / 'Failed'
"""
progress_df.loc[progress_df["Symbol"] == symbol, "Status"] = status
def update_progress_csv(progress_df):
"""
Update download progress csv file with DataFrame
Parameters:
- progress_df (DataFrame) : DataFrame which records the download status of each symbol
"""
progress_df.to_csv(PROGRESS, index=False)
def alpha_dict_to_csv(alpha_data_dict, function_key):
"""
Export data from Alpha Vantage to csv files: Meta Data ( Symbol, Last Updated Date and Time Zone ) of all symbols which includes to meta.csv,
time series data to csv file with symbol as the name
Parameters:
- alpha_data_dict (dictionary) : dictionary which contains data from Alpha Vantage
- symbol (string) : symbol of the equity you want to enquire e.g. 'IBM'
- function_key (string) : key of the data which contain the time series you enquire e.g. 'Weekly Adjusted Time Series', 'Time Series (Daily)'
"""
meta_data = {
"Symbol": alpha_data_dict["Meta Data"]["2. Symbol"],
"Last_Refreshed": alpha_data_dict["Meta Data"]["3. Last Refreshed"],
"Time_Zone": alpha_data_dict["Meta Data"]["4. Time Zone"],
}
meta_df = pd.DataFrame(meta_data, index=[0])
meta_df.to_csv(METADATA, mode="a", header=False, index=False)
rows = []
alpha_data_df = pd.DataFrame(
columns=[
"Date",
"Open",
"High",
"Low",
"Close",
"Adjusted_Close",
"Volume",
"Dividend_Amount",
]
)
for date, values in alpha_data_dict[function_key].items():
row = {
"Date": date,
"Open": values["1. open"],
"High": values["2. high"],
"Low": values["3. low"],
"Close": values["4. close"],
"Adjusted_Close": values["5. adjusted close"],
"Volume": values["6. volume"],
"Dividend_Amount": values.get("7. dividend amount", 0),
}
alpha_data_df = alpha_data_df.append(row, ignore_index=True)
alpha_data_df["Date"] = pd.to_datetime(alpha_data_df["Date"])
# Convert columns to float types
alpha_data_df[float_cols] = alpha_data_df[float_cols].astype(float)
# export the dataframe to a csv file
alpha_data_df.to_csv(symbol + ".csv", index=False)
daily_counter = 1 # counter of daily requests
# Load download progress if progress file exist, otherwise create a new dataframe for recording progress
if os.path.isfile(PROGRESS):
download_progress_df = pd.read_csv(PROGRESS)
else:
# full_tickers_df = pd.read_csv('/kaggle/input/nasdaq20230311/nasdaq_screener_1678529494978.csv', keep_default_na=False, na_values=[''])
# download_progress_df = pd.DataFrame({'Symbol' : full_tickers_df['Symbol'], 'Status' : 'Not Started'})
full_tickers_df = get_listing_status(ALPHA_ACCESS)
download_progress_df = pd.DataFrame(
{"Symbol": full_tickers_df["symbol"], "Status": "Not Started"}
)
float_cols = [
"Open",
"High",
"Low",
"Close",
"Adjusted_Close",
"Volume",
"Dividend_Amount",
]
# list of symbols which haven't been downloaded yet
undownloaded_df = download_progress_df[download_progress_df["Status"] == "Not Started"]
# get time series weekly adjusted data of each ticker from Alpha Vantage to csv files
for symbol in undownloaded_df.Symbol:
# if daily limit not reached yet
if daily_counter <= ALPHA_DAILY_LIMIT:
get_data_from_alpha_vantage("TIME_SERIES_DAILY_ADJUSTED", symbol, ALPHA_ACCESS)
# if len(alpha_data_dict) > 0:
try:
# alpha_dict_to_csv(alpha_data_dict, 'Weekly Adjusted Time Series')
update_progress(download_progress_df, symbol, "Downloaded")
except Exception as e:
update_progress(download_progress_df, symbol, "Failed")
print(e, symbol)
# else:
# update_progress(download_progress_df, symbol, "Failed")
daily_counter += 1
else:
# update download_prgress.csv and break the for loop
# update_progress_csv(download_progress_df)
break
update_progress_csv(download_progress_df)
# if data of all tickers have been downloaded
if download_progress_df[download_progress_df["Status"] == "Not Started"].empty:
print("All downloaded !")
"""
download_progress_old = pd.read_csv(PROGRESS)
daily_counter = 1
full_tickers_new = pd.read_csv('https://www.alphavantage.co/query?function=LISTING_STATUS&apikey='+ALPHA_ACCESS)
download_progress_new = pd.DataFrame({'Symbol' : full_tickers_new['symbol'], 'Status' : 'Not Started'})
downloaded = download_progress_old.loc[download_progress_old['Status'] == 'Downloaded']
for index, row in downloaded.iterrows():
# Check if the Symbol is in df_old
if row['Symbol'] in download_progress_new['Symbol'].values:
# Update the Status column in df_new based on the value in df_old
download_progress_new.loc[download_progress_new['Symbol'] == row['Symbol'], 'Status'] = 'Downloaded'
"""
# remove files from working folder
#!rm -rf /kaggle/working/*
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# ## Reading and understanding the data
df = pd.read_csv(
"../input/demonetization-in-india-twitter-data/demonetization-tweets.csv",
encoding="ISO-8859-1",
)
df.head()
df.shape
# ## Cleaning the tweets
df = df["text"]
df = pd.DataFrame({"tweet": df})
df.head()
# Removing few characters
df["cleaned_tweet"] = df["tweet"].replace(
r'\'|\"|\,|\.|\?|\+|\-|\/|\=|\(|\)|\n|"', "", regex=True
)
# Replacing few double spaces with single space
df["cleaned_tweet"] = df["cleaned_tweet"].replace(" ", " ")
# remove emoticons form the tweets
df["cleaned_tweet"] = df["cleaned_tweet"].replace(r"<ed>", "", regex=True)
df["cleaned_tweet"] = df["cleaned_tweet"].replace(
r"\B<U+.*>|<U+.*>\B|<U+.*>", "", regex=True
)
# convert tweets to lowercase
df["cleaned_tweet"] = df["cleaned_tweet"].str.lower()
# remove user mentions
df["cleaned_tweet"] = df["cleaned_tweet"].replace(r"^(@\w+)", "", regex=True)
# remove 'rt' in the beginning
df["cleaned_tweet"] = df["cleaned_tweet"].replace(r"^(rt @)", "", regex=True)
# remove_symbols
df["cleaned_tweet"] = df["cleaned_tweet"].replace(r"[^a-zA-Z0-9]", " ", regex=True)
# remove punctuations
df["cleaned_tweet"] = df["cleaned_tweet"].replace(
r'[[]!"#$%\'()\*+,-./:;<=>?^_`{|}]+', "", regex=True
)
# remove_URL(x):
df["cleaned_tweet"] = df["cleaned_tweet"].replace(r"https.*$", "", regex=True)
# remove 'amp' in the text
df["cleaned_tweet"] = df["cleaned_tweet"].replace(r"amp", "", regex=True)
# remove words of length 1 or 2
df["cleaned_tweet"] = df["cleaned_tweet"].replace(r"\b[a-zA-Z]{1,2}\b", "", regex=True)
# remove extra spaces in the tweet
df["cleaned_tweet"] = df["cleaned_tweet"].replace(r"^\s+|\s+$", " ", regex=True)
# Now we have the cleaned_tweet. But the stop words are still present. We can use this cleaned_tweet forcreating phrases and ranking the phrases from the tweets.
# We will also remove the stop words and name the column as fully_cleaned_tweet. This will be used for clustering the sentiments.
# list of words to remove
words_to_remove = [
"ax",
"i",
"you",
"edu",
"s",
"t",
"m",
"subject",
"can",
"lines",
"re",
"what",
"there",
"all",
"we",
"one",
"the",
"a",
"an",
"of",
"or",
"in",
"for",
"by",
"on",
"but",
"is",
"in",
"a",
"not",
"with",
"as",
"was",
"if",
"they",
"are",
"this",
"and",
"it",
"have",
"has",
"from",
"at",
"my",
"be",
"by",
"not",
"that",
"to",
"from",
"com",
"org",
"like",
"likes",
"so",
"said",
"from",
"what",
"told",
"over",
"more",
"other",
"have",
"last",
"with",
"this",
"that",
"such",
"when",
"been",
"says",
"will",
"also",
"where",
"why",
"would",
"today",
"in",
"on",
"you",
"r",
"d",
"u",
"hw",
"wat",
"oly",
"s",
"b",
"ht",
"rt",
"p",
"the",
"th",
"n",
"was",
"via",
]
# remove stopwords and words_to_remove
stop_words = set(stopwords.words("english"))
mystopwords = [stop_words, words_to_remove]
df["fully_cleaned_tweet"] = df["cleaned_tweet"].apply(
lambda x: " ".join([word for word in x.split() if word not in mystopwords])
)
df.head()
|
# 画像分類をやってみたい人向けのコードです。
# 波形をスペクトログラムにすると画像として扱えるので、
# motionの画像とmusicの画像とをモデルに入れて、
# もし対応するなら1,しないなら0として学習させます。
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
df = pd.read_csv("/kaggle/input/data-science-osaka-spring-2023/train.csv")
df["label"] = 1
df["fold"] = -1
kf = KFold(n_splits=3, shuffle=True, random_state=0)
for fold, (_, valid_idx) in enumerate(kf.split(df)):
df.loc[valid_idx, "fold"] = fold
print(df["fold"].value_counts())
import librosa
idx = 0
motion_id = df["ID"].values[idx]
music_id = df["music"].values[idx]
motion = pd.read_csv(
f"/kaggle/input/data-science-osaka-spring-2023/motion/motion/{motion_id}.csv"
)[["GYRO_X", "GYRO_Y", "GYRO_Z"]].values
wave, sr = librosa.load(
f"/kaggle/input/data-science-osaka-spring-2023/music/music/{music_id}.wav"
)
print(motion.shape, len(wave), sr)
# CQTは低周波でも特徴を拾いやすい、らしいです。
# musicはlibrosaでスペクトログラムにしました。
import torch
from nnAudio.Spectrogram import CQT1992v2
qtransform = CQT1992v2(sr=32, fmin=1, fmax=16, hop_length=16, trainable=False)
image = qtransform(torch.from_numpy(motion).float().permute(1, 0))
plt.figure(figsize=(20, 4))
for i, img in enumerate(image):
plt.subplot(1, 3, i + 1)
plt.imshow(img)
plt.show()
melspec = librosa.amplitude_to_db(np.abs(librosa.stft(wave)))
plt.imshow(melspec)
plt.show()
# こんな感じで、波形を画像化します。
# # Dataset
# ディープラーニングをしたいなら、下記のDatasetは飽きるほど書くので練習してみるといいかもしれません。
# init, len, getitemが必須です。
# init: 初期化条件。データとか色々定義する。
# len: データの長さ。
# getitem: idxを行番号として、データを取り出す。
import torchvision
from torch.utils.data import Dataset, DataLoader
class CustomData(Dataset):
def __init__(self, data, qtransform):
self.data = data
self.qtransform = qtransform
def __len__(self):
return len(self.data)
def normalize(self, x):
_mean = x.mean()
_std = x.std()
x = (x - _mean) / _std
_max = x.max()
_min = x.min()
x = (x - _min) / (_max - _min)
return x
def __getitem__(self, idx):
motion_id = self.data["ID"].values[idx]
music_id = self.data["music"].values[idx]
motion = pd.read_csv(
f"/kaggle/input/data-science-osaka-spring-2023/motion/motion/{motion_id}.csv"
)[["GYRO_X", "GYRO_Y", "GYRO_Z"]].values
motion = motion[-1000:, :]
wave, sr = librosa.load(
f"/kaggle/input/data-science-osaka-spring-2023/music/music/{music_id}.wav"
)
motion_image = self.qtransform(torch.from_numpy(motion).float().permute(1, 0))
motion_image = self.normalize(motion_image)
music_image = librosa.amplitude_to_db(np.abs(librosa.stft(wave)))
music_image = torchvision.transforms.Resize([256, 256])(
torch.from_numpy(music_image).unsqueeze(0)
)
music_image = self.normalize(music_image)
label = self.data["label"].values[idx]
label = torch.tensor(label, dtype=torch.float)
return motion_image, music_image, label
qtransform = CQT1992v2(sr=32, fmin=1, fmax=16, hop_length=16, trainable=False)
ds = CustomData(df, qtransform)
motion, music, label = ds[0]
print(motion.shape, music.shape, label)
# 各行の出力は、motion画像(3 x 縦横)、music画像(1 x 縦横)、ラベルです。
# # Model
# timmを使うと優秀なモデルを簡単に定義できます。
# 外部データなしなので、pretrained = Falseにしています。
# motionの特徴とmusicの特徴をcatでつなげて、最終的にheadで分類させます。
import timm
import torch.nn as nn
class CustomModel(nn.Module):
def __init__(self):
super().__init__()
self.motion_model = timm.create_model(
"efficientnetv2_s", pretrained=False, in_chans=3, num_classes=0
)
self.music_model = timm.create_model(
"efficientnet_b0", pretrained=False, in_chans=1, num_classes=0
)
self.head = nn.Sequential(
nn.Linear(
in_features=self.motion_model.num_features
+ self.music_model.num_features,
out_features=128,
),
nn.BatchNorm1d(128),
nn.Dropout(p=0.1),
nn.ReLU(),
nn.Linear(in_features=128, out_features=1),
)
def forward(self, motion, music):
x1 = self.motion_model(motion)
x2 = self.music_model(music)
logits = self.head(torch.cat([x1, x2], dim=-1))
return logits
model = CustomModel()
dl = DataLoader(ds, batch_size=4)
b = next(iter(dl))
print(b[0].shape, b[1].shape, b[2].shape)
# batch_size x 3 x 縦横, batch_size x 1 x 縦横, batch_size
logits = model(b[0], b[1])
print(logits.shape)
# モデルに入れると、batch_size x 1のサイズで予測結果を出します。
# # Training
fold = 0
train = df.loc[df.fold != fold].reset_index(drop=True)
valid = df.loc[df.fold == fold].reset_index(drop=True)
train_musics = train["music"].unique()
valid_musics = valid["music"].unique()
print(len(train_musics), len(valid_musics))
train = pd.merge(
pd.DataFrame(
[(i, m) for i in train["ID"] for m in train_musics], columns=["ID", "music"]
),
train[["ID", "music", "label"]],
how="left",
on=["ID", "music"],
).fillna(0)
valid = pd.merge(
pd.DataFrame(
[(i, m) for i in valid["ID"] for m in valid_musics], columns=["ID", "music"]
),
valid[["ID", "music", "label"]],
how="left",
on=["ID", "music"],
).fillna(0)
print(train.shape, valid.shape)
# 以下のループで学習と検証を繰り返します。
# これもだいたいはよくある書き方なので、
# 初めての方はググりつつ書いてみると練習になると思います。
from timm.utils import AverageMeter
from tqdm.notebook import tqdm, trange
train_ds = CustomData(train, qtransform)
valid_ds = CustomData(valid, qtransform)
train_dl = DataLoader(
train_ds,
batch_size=32,
shuffle=True,
drop_last=True,
pin_memory=True,
num_workers=2,
)
valid_dl = DataLoader(
valid_ds,
batch_size=32 * 2,
shuffle=False,
drop_last=False,
pin_memory=True,
num_workers=2,
)
model = CustomModel().cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
criterion = nn.BCEWithLogitsLoss()
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=10, eta_min=1e-6, verbose=True
)
history = {"train": [], "valid": []}
for e in range(10):
print(f"\n- Epoch: {e} -")
model.train()
train_loss = AverageMeter()
pbar = tqdm(train_dl)
for b in pbar:
motion = b[0].cuda()
music = b[1].cuda()
label = b[2].cuda()
logits = model(motion, music)
loss = criterion(logits.reshape(-1, 1), label.reshape(-1, 1))
loss.backward()
optimizer.step()
optimizer.zero_grad()
train_loss.update(val=loss.item(), n=len(motion))
pbar.set_postfix(TrainLoss=train_loss.avg)
scheduler.step()
model.eval()
valid_loss = AverageMeter()
with torch.no_grad():
for b in tqdm(valid_dl):
motion = b[0].cuda()
music = b[1].cuda()
label = b[2].cuda()
logits = model(motion, music)
loss = criterion(logits.reshape(-1, 1), label.reshape(-1, 1))
valid_loss.update(val=loss.item(), n=len(motion))
print("TrainLoss:", train_loss.avg)
print("ValidLoss:", valid_loss.avg)
history["train"].append(train_loss.avg)
history["valid"].append(valid_loss.avg)
plt.plot(history["train"])
plt.plot(history["valid"])
plt.legend()
plt.show()
# # Evaluation
# AUCとMAP@7を見ておきます。
# 予測は検証時とほぼ同じ書き方で、モデルの出力をくっつけるだけです。
preds = []
model.eval()
with torch.no_grad():
for b in tqdm(valid_dl):
logits = model(b[0].cuda(), b[1].cuda())
preds += logits.sigmoid().cpu().reshape(-1).tolist()
valid["prob"] = preds
valid.head()
from sklearn.metrics import roc_auc_score
auc = roc_auc_score(valid["label"], valid["prob"])
print("AUC:", auc)
oof = valid.sort_values(by=["ID", "prob"], ascending=[True, False])
oof = (
oof.groupby("ID")["music"]
.agg(list)
.reset_index()
.rename(columns={"music": "preds"})
)
oof = oof.merge(df[["ID", "music"]], how="left", on="ID")
oof.head()
def map_per_row(label, predictions):
try:
return 1 / (predictions[:7].index(label) + 1)
except ValueError:
return 0.0
def calc_metric(labels, predictions):
return np.mean([map_per_row(l, p) for l, p in zip(labels, predictions)])
score = calc_metric(oof["music"], oof["preds"])
print("MAP:", score)
# # Inference
test_musics = [i for i in range(250) if i not in sorted(df["music"].unique())]
test = pd.DataFrame(
[(i, m) for i in np.arange(100, 250) for m in test_musics], columns=["ID", "music"]
)
test["label"] = -1
test_ds = CustomData(test, qtransform)
test_dl = DataLoader(
test_ds,
batch_size=32 * 2,
shuffle=False,
drop_last=False,
pin_memory=True,
num_workers=2,
)
preds = []
model.eval()
with torch.no_grad():
for b in tqdm(test_dl):
logits = model(b[0].cuda(), b[1].cuda())
preds += logits.sigmoid().cpu().reshape(-1).tolist()
test["prob"] = preds
submit = test.sort_values(by=["ID", "prob"], ascending=[True, False])
submit = submit.groupby("ID")["music"].agg(list).reset_index()
submit["music"] = submit["music"].apply(lambda x: " ".join([str(i) for i in x][:7]))
submit.to_csv("submission.csv", index=False)
submit.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
df = pd.concat([test, train])
print(df.head(10))
df = df.reset_index()
df = df.reset_index(drop=True)
# 1459 + 1460
# # Eksik veri tespiti
# **isna** fonksiyonu eksik verileri daha kolay bulmamızı sağlar
data = []
j = 0
for i in train:
data.append(df.iloc[:, j : j + 1].isna().sum())
# print(data[j])
j += 1
# eksik verileri olan kolonları tespit edelim
# eksik verisi olan kolonlar / ne kadar eksik olduğu:
# LotFrontage 259
# Alley 1369
# MasVnrType 8
# MasVnrArea 8
# BsmtQual 37
# BsmtCond 37
# BsmtExposure 38
# BsmtFinType1 37
# BsmtFinType2 38
# Electrical 1
# FireplaceQu 690
# GarageType 81
# GarageYrBlt 81
# GarageFinish 81
# GarageQual 81
# GarageCond 81
# PoolQC 1453
# Fence 1179
# MiscFeature 1406
#
#
# # Eksik veri görselleştirme
# missingno kütüphanesi kullanılarak eksik veriler görselleştririlebilir
import missingno as msno
msno.bar(df)
# # Eksik verilerde Deletion yöntemi
# import warningsher kolonda 1460 veri var. eksik veri oranı %40'ın üzerinde olan kolonları modelde kullanmama kararı aldık. Bu yüzden Alley, FireplaceQu, PoolQC, fence, Miscfeature kolonlarına burada veda ediyoruz. Geri kalan kolonları uygun tekniklerle doldurmaya çalışacağız :)
total_data = df.shape[0]
column_shape = df.shape[1]
data = 0
i = 0
while i <= column_shape:
data = (df.iloc[:, i : i + 1].isna().sum() / total_data) * 100
data = data.to_numpy()
if data > 40:
df = df.drop(df.iloc[:, i : i + 1], axis=1)
i += 1
# print(df.head(10))
# YAzdığım bu fonksiyon sayesinde belli bir eksik veri oranının üzerindeki kolonları kolayca silebilirsiniz. çıkan error mesajını görmezden gelebilirsiniz şimdilik. Onu daha sonra çözeceğim Ş:
# # eksik veri doldurma yöntemleri
# eksik veri türleri: tamamen rastgele, rastgele , rastgele değil.
# # fillna
# df.iloc[:,3:4].fillna("sample")
# fillna fonksiyonu ile eksik veriler istenilen değer ile doldurulabilir
# # SimpleImputer
data = df["LotFrontage"]
print("data before modeling")
print(data.head(30))
data = data.values
data = data.reshape(-1, 1)
from sklearn.impute import SimpleImputer
fea_transformer = SimpleImputer(strategy="median")
values = fea_transformer.fit_transform(data)
data = pd.DataFrame(values)
print("data after modeling")
print(data.head(30))
# eksik veriler **simpleimputer** yöntemi ile de doldurulabilir. Simpleimputer yöntemi verileri *mean*(ortalama), *median*(medyan),*most_frequent* (mod) değerleri ile doldurur. Yukarıdaki örnekte LotFrontage kolonu median değeri ile dolduruldu.
# # KNN algoritması ile eksik veri doldurma
data = df["LotFrontage"]
print("data before modeling")
print(data.head(30))
data = data.values
data = data.reshape(-1, 1)
from sklearn.impute import KNNImputer
fea_transformer = KNNImputer(n_neighbors=3)
values = fea_transformer.fit_transform(data)
data = pd.DataFrame(values)
print("data after modeling")
print(data.head(30))
# Eksik veriler makine öğrenmesi modelleri ile de doldurulabilir. yukarıda knn algoritması ile LotFrontage kolonunu doldurduk
# # Yöntem seçimi
# Eksik veri doldurma yöntemlerinden birkaçını tanıdığımıza göre hangi kolonda hangi yöntemi kullanacağımıza kara vermeliyiz
df = df.fillna(df.mode().iloc[0])
msno.bar(df)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.